nginx scrapyd访问认证 – address already in use

sudo netstat -ntpl,sudo netstat -tulpn两个命令:

(boss) [chen@VM_0_3_centos conf.d]$ sudo netstat -ntpl
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      5013/nginx: worker  
tcp        0      0 0.0.0.0:6801            0.0.0.0:*               LISTEN      5013/nginx: worker  
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      22677/sshd          
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      1310/master         
tcp6       0      0 :::80                   :::*                    LISTEN      5013/nginx: worker  
tcp6       0      0 ::1:25                  :::*                    LISTEN      1310/master         
tcp6       0      0 :::9000                 :::*                    LISTEN      29245/dockerd       
tcp6       0      0 :::2377                 :::*                    LISTEN      29245/dockerd       
tcp6       0      0 :::7946                 :::*                    LISTEN      29245/dockerd

(boss) [chen@VM_0_3_centos conf.d]$ sudo netstat -tulpn
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 127.0.0.1:6800          0.0.0.0:*               LISTEN      5237/python         
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      22677/sshd          
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      1310/master         
tcp6       0      0 ::1:25                  :::*                    LISTEN      1310/master         
tcp6       0      0 :::9000                 :::*                    LISTEN      29245/dockerd       
tcp6       0      0 :::2377                 :::*                    LISTEN      29245/dockerd       
tcp6       0      0 :::7946                 :::*                    LISTEN      29245/dockerd       
udp        0      0 0.0.0.0:4789            0.0.0.0:*                           -                   
udp        0      0 0.0.0.0:68              0.0.0.0:*                           927/dhclient        
udp        0      0 172.16.0.3:123          0.0.0.0:*                           4636/ntpd           
udp        0      0 127.0.0.1:123           0.0.0.0:*                           4636/ntpd           
udp        0      0 0.0.0.0:514             0.0.0.0:*                           27343/rsyslogd      
udp6       0      0 :::7946                 :::*                                29245/dockerd       
udp6       0      0 fe80::5054:ff:fe36::123 :::*                                4636/ntpd           
udp6       0      0 ::1:123                 :::*                                4636/ntpd           
udp6       0      0 :::514                  :::*                                27343/rsyslogd  

技术问题还是stackoverflow靠谱:https://stackoverflow.com/questions/42303401/nginx-will-not-start-address-already-in-use

scrapyd访问认证nginx反向代理:

# For more information on configuration, see:
#   * Official English Documentation: http://nginx.org/en/docs/
#   * Official Russian Documentation: http://nginx.org/ru/docs/

user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /var/run/nginx.pid;

# Load dynamic modules. See /usr/share/doc/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;

events {
    worker_connections 1024;
}

http {
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  /var/log/nginx/access.log  main;

    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   65;
    types_hash_max_size 2048;

    include             /etc/nginx/mime.types;
    default_type        application/octet-stream;

    # Load modular configuration files from the /etc/nginx/conf.d directory.
    # See http://nginx.org/en/docs/ngx_core_module.html#include
    # for more information.
    include /etc/nginx/conf.d/*.conf;
    server {
        listen 6801;
        server_name localhost;
        #charset koi8-r;
        #access_log logs/host.access.log main;
        location / {
            proxy_pass http://127.0.0.1:6800/;
            auth_basic "Restricted";
            auth_basic_user_file /etc/nginx/conf.d/pswdScrapyd;
        }

    
    }

# Settings for a TLS enabled server.
#
#    server {
#        listen       443 ssl http2 default_server;
#        listen       [::]:443 ssl http2 default_server;
#        server_name  _;
#        root         /usr/share/nginx/html;
#
#        ssl_certificate "/etc/pki/nginx/server.crt";
#        ssl_certificate_key "/etc/pki/nginx/private/server.key";
#        ssl_session_cache shared:SSL:1m;
#        ssl_session_timeout  10m;
#        ssl_ciphers HIGH:!aNULL:!MD5;
#        ssl_prefer_server_ciphers on;
#
#        # Load configuration files for the default server block.
#        include /etc/nginx/default.d/*.conf;
#
#        location / {
#        }
#
#        error_page 404 /404.html;
#        location = /404.html {
#        }
#
#        error_page 500 502 503 504 /50x.html;
#        location = /50x.html {
#        }
#    }

}

scrapyd.conf需要加入:

[deploy]
url = http://127.0.0.1:6801/
project = jingdong
username = user
password = password

这里的用户名密码是htpasswd生成的,比如sudo htpasswd -c password user

scrapy进程无端被终止(kdevtmpfsi病毒利用scrapyd植入挖矿程序)

先来个总结:Redis无法分配内存时系统OOM选择性杀掉了scrapy进程,开始以为找到了解决办法,设置乐redis最大使用内存和允许over-commit-memory。但是最后才发现,是臭名昭著的kdevtmpfsi病毒利用scrapyd漏洞植入了我三台服务器。不得不说,安装scrapyd服务时放松了警惕,直接暴露端口到外网但是没有注意到这个服务没有加密,任何人都能在我的服务器上跑爬虫程序。参考这个帖子杀毒即可:https://blog.csdn.net/weixin_39334709/article/details/109135642 ,一定要注意删除定时任务。

看似简单,实际上耗费了我两天时间找出问题所在,用了strace, py-spy等工具,当然少不了google, stackoverflow。开始以为爬虫去重用了redis,以前没有这么高速爬取过,可能压力不大,没有出过问题,所以才会出现下面的linux系统上scrapy被kill但是跑windows上没有问题。

在更改了某个设置或者代码后,scrapy莫名其妙的被杀死了,奇怪的是没有任何日志(/var/log目录下没有相关日志),只是终端显示Killed。一看就知道类似kill -9 的输出,但是问题是我没有执行这个命令。py-spy虽然能看出资源利用率,但是进程被杀死后py-spy就退出了,没有任何帮助。而且,这个脚本在linux上才会被Kill,windows上跑没有任何问题,很是让人头疼。

strace的监控如下,能找到shutdown这个命令,但是看不出哪里发出的

strace有时候能看到shutdown命令的发出但是信息没有意义:

18:04:59.880446 connect(267, {sa_family=AF_INET, sin_port=htons(3389), sin_addr=inet_addr("114.104.128.151")}, 16) = -1 EINPROGRESS (Operation now in progress) <0.000043>
18:04:59.880533 epoll_ctl(5, EPOLL_CTL_ADD, 267, {EPOLLIN, {u32=267, u64=140496970187019}}) = 0 <0.000010>
18:04:59.880574 epoll_ctl(5, EPOLL_CTL_MOD, 267, {EPOLLIN|EPOLLOUT, {u32=267, u64=140496970187019}}) = 0 <0.000006>
18:04:59.880618 getsockopt(435, SOL_SOCKET, SO_ERROR, [0], [4]) = 0 <0.000007>
18:04:59.880650 connect(435, {sa_family=AF_INET, sin_port=htons(3389), sin_addr=inet_addr("114.104.128.151")}, 16) = -1 EINPROGRESS (Operation now in progress) <0.000021>
18:04:59.880701 epoll_ctl(5, EPOLL_CTL_ADD, 435, {EPOLLIN, {u32=435, u64=140496970187187}}) = 0 <0.000007>
18:04:59.880735 epoll_ctl(5, EPOLL_CTL_MOD, 435, {EPOLLIN|EPOLLOUT, {u32=435, u64=140496970187187}}) = 0 <0.000007>
18:04:59.880834 epoll_wait(5, [{EPOLLOUT, {u32=493, u64=140496970187245}}, {EPOLLOUT, {u32=495, u64=140496970187247}}, {EPOLLOUT, {u32=483, u64=140496970187235}}, {EPOLLOUT, {u32=484, u64=140496970187236}}, {EPOLLIN, {u32=496, u64=140496970187248}}], 629, 0) = 5 <0.000013>
18:04:59.880931 sendto(493, "\27\3\3\0\23\223\316,\360\247K\355\302v{\312\r\327\271\377!\212\222q", 24, 0, NULL, 0) = 24 <0.000019>
18:04:59.881084 epoll_ctl(5, EPOLL_CTL_DEL, 493, 0x7ffd97c330cc) = 0 <0.000009>
18:04:59.881173 shutdown(493, SHUT_RDWR) = 0 <0.000016>
18:04:59.881213 close(493) = 0 <0.000012>
18:04:59.881372 sendto(495, "\27\3\3\0\23\22\302H\334\320\f\f\3348\352$\376,\261\254\277\317P\361", 24, 0, NULL, 0) = 24 <0.000018>
18:04:59.881431 epoll_ctl(5, EPOLL_CTL_DEL, 495, 0x7ffd97c330cc) = 0 <0.000008>
18:04:59.881504 shutdown(495, SHUT_RDWR) = 0 <0.000014>
18:04:59.881541 close(495) = 0 <0.000008>
18:04:59.881642 sendto(483, "\27\3\3\1+n\34\346\24\271/W\274q\312$\227\224}\302\25\22)\310\350M0j\375p\10Z"…, 304, 0, NULL, 0) = 304 <0.000016>
18:04:59.881699 epoll_ctl(5, EPOLL_CTL_MOD, 483, {EPOLLIN, {u32=483, u64=140496970187235}}) = 0 <0.000009>
18:04:59.881755 sendto(484, "\27\3\3\1-\335\330:\223E\277\31\244w\202\220\t\310\200\363o6?\346V\267\273&\35]\242z"…, 306, 0, NULL, 0) = 306 <0.000032>
18:04:59.881825 epoll_ctl(5, EPOLL_CTL_MOD, 484, {EPOLLIN, {u32=484, u64=140496970187236}}) = 0 <0.000007>
18:04:59.881881 recvfrom(496, "\26\3\3\0z\2\0\0v\3\3}\345\315\324\272\261\t\245O;\375\226\223\4+\313\332\v\\250+"…, 65536, 0, NULL, NULL) = 2776 <0.000011>
18:04:59.883097 write(1, "{'_id': '12331351',\n 'product_ad"…, 1985) = 1985 <0.000032>
18:04:59.883166 write(1, "\n", 1) = 1 <0.000007>
18:04:59.886004 epoll_wait(5, [], 627, 0) = 0 <0.000008>
18:04:59.886065 epoll_wait(5, [], 627, 18) = 0 <0.018731>
18:04:59.905986 epoll_wait(5, [], 627, 0) = 0 <0.000007>
18:04:59.906621 ioctl(14, FIONBIO, [1]) = 0 <0.000007>
18:04:59.906667 recvfrom(14, 0x55ed7f72e4d0, 65536, 0, NULL, NULL) = -1 EAGAIN (Resource temporarily unavailable) <0.000007>
18:04:59.906719 ioctl(14, FIONBIO, [1]) = 0 <0.000006>
18:04:59.906780 poll([{fd=14, events=POLLOUT}], 1, 120000) = 1 ([{fd=14, revents=POLLOUT}]) <0.000008>
18:04:59.906829 sendto(14, "3\r\n$4\r\nSADD\r\n$20\r\nproduct_1:dup"…, 88, 0, NULL, 0) = 88 <0.000019> 18:04:59.906895 poll([{fd=14, events=POLLIN}], 1, 120000) = 1 ([{fd=14, revents=POLLIN}]) <0.003565> 18:04:59.910493 recvfrom(14, ":1\r\n", 65536, 0, NULL, NULL) = 4 <0.000008> 18:04:59.910807 ioctl(14, FIONBIO, [1]) = 0 <0.000007> 18:04:59.910845 recvfrom(14, 0x55ed7f72e4d0, 65536, 0, NULL, NULL) = -1 EAGAIN (Resource temporarily unavailable) <0.000006> 18:04:59.910886 ioctl(14, FIONBIO, [1]) = 0 <0.000006> 18:04:59.910945 poll([{fd=14, events=POLLOUT}], 1, 120000) = 1 ([{fd=14, revents=POLLOUT}]) <0.000007> 18:04:59.910977 sendto(14, "4\r\n$4\r\nZADD\r\n$18\r\nproduct_1:req"…, 1671, 0, NULL, 0) = 1671 <0.000024>
18:04:59.911041 poll([{fd=14, events=POLLIN}], 1, 120000) = 1 ([{fd=14, revents=POLLIN}]) <0.002541>
18:04:59.913617 recvfrom(14, ":1\r\n", 65536, 0, NULL, NULL) = 4 <0.000009>
18:04:59.915223 epoll_wait(5, [], 627, 0) = 0 <0.000007>
18:04:59.916142 epoll_wait(5, [], 627, 0) = 0 <0.000007>
18:04:59.916677 ioctl(14, FIONBIO, [1]) = 0 <0.000007>
18:04:59.916719 recvfrom(14, 0x55ed7f72e4d0, 65536, 0, NULL, NULL) = -1 EAGAIN (Resource temporarily unavailable) <0.000007>
18:04:59.916763 ioctl(14, FIONBIO, [1]) = 0 <0.000006>
18:04:59.916831 poll([{fd=14, events=POLLOUT}], 1, 120000) = 1 ([{fd=14, revents=POLLOUT}]) <0.000007>
18:04:59.916865 sendto(14, "3\r\n$4\r\nSADD\r\n$20\r\nproduct_1:dup"…, 88, 0, NULL, 0) = 88 <0.000018> 18:04:59.916922 poll([{fd=14, events=POLLIN}], 1, 120000) = 1 ([{fd=14, revents=POLLIN}]) <0.002686> 18:04:59.919641 recvfrom(14, ":1\r\n", 65536, 0, NULL, NULL) = 4 <0.000008> 18:04:59.919924 ioctl(14, FIONBIO, [1]) = 0 <0.000007> 18:04:59.919960 recvfrom(14, 0x55ed7f72e4d0, 65536, 0, NULL, NULL) = -1 EAGAIN (Resource temporarily unavailable) <0.000006> 18:04:59.920001 ioctl(14, FIONBIO, [1]) = 0 <0.000006> 18:04:59.920058 poll([{fd=14, events=POLLOUT}], 1, 120000) = 1 ([{fd=14, revents=POLLOUT}]) <0.000007> 18:04:59.920089 sendto(14, "4\r\n$4\r\nZADD\r\n$18\r\nproduct_1:req"…, 1678, 0, NULL, 0) = 1678 <0.000023>
18:04:59.920151 poll([{fd=14, events=POLLIN}], 1, 120000) = 1 ([{fd=14, revents=POLLIN}]) <0.002500>
18:04:59.922685 recvfrom(14, ":1\r\n", 65536, 0, NULL, NULL) = 4 <0.000007>
18:04:59.924295 epoll_wait(5, [], 627, 0) = 0 <0.000007>
18:04:59.924357 epoll_wait(5, [{EPOLLIN, {u32=496, u64=140496970187248}}], 627, 803) = 1 <0.251454>
18:05:00.175939 recvfrom(496, "\350\241\"6V\377o5@\30\20\34\351z\277i\347\374\213\255:\236)\352\205\343\336\25\35@x\310"…, 65536, 0, NULL, NULL) = 1216 <0.000010>
18:05:00.180772 epoll_ctl(5, EPOLL_CTL_MOD, 496, {EPOLLIN|EPOLLOUT, {u32=496, u64=140496970187248}}) = 0 <0.000015>
18:05:00.180903 epoll_wait(5, [{EPOLLOUT, {u32=496, u64=140496970187248}}, {EPOLLIN, {u32=369, u64=140496970187121}}], 627, 546) = 2 <0.000008>
18:05:00.180978 sendto(496, "\24\3\3\0\1\1\27\3\3\0E\375\310Y\265\342\223\6\226\301\252\373\356\317\271\26\250otI\316\370"…, 429, 0, NULL, 0) = 429 <0.000020>
18:05:00.181046 epoll_ctl(5, EPOLL_CTL_MOD, 496, {EPOLLIN, {u32=496, u64=140496970187248}}) = 0 <0.000006>
18:05:00.181097 recvfrom(369, "\27\3\3\1\n\2701\2575\335\30\246\350\\370^k&2OB\351Y\27\3679\27\357\23\243\244\322"…, 65536, 0, NULL, NULL) = 542 <0.000008>
18:05:00.181258 epoll_wait(5, [], 627, 546) = 0 <0.546960>
18:05:00.728448 epoll_wait(5, [], 627, 0) = 0 <0.000007>
18:05:00.728541 epoll_wait(5, ) = ?
18:05:01.812104 +++ killed by SIGKILL +++
#!/bin/bash
scrapy crawl product_1
ret=$?
#
#  returns > 127 are a SIGNAL
#
if [ $ret -gt 127 ]; then
        sig=$((ret - 128))
        echo "Got SIGNAL $sig"
        if [ $sig -eq $(kill -l SIGKILL) ]; then
                echo "process was killed with SIGKILL"
                dmesg > $HOME/dmesg-kill.log
        fi
fi

结果:
test_k.sh: line 3: 11181 Killed scrapy crawl product_1
Got SIGNAL 9
process was killed with SIGKILL

试试这个,先关闭OOM:https://stackoverflow.com/questions/25000496/python-script-terminated-by-sigkill-rather-than-throwing-memoryerror

You can add these lines to your /etc/sysctl.conf file to effectively disable the OOM killer:

vm.overcommit_memory = 2
vm.overcommit_ratio = 100

后来我发现scrapy进程下有5个线程:

(boss) [chen@VM_0_2_centos jingdong]$ strace -f -v -e trace=signal -tt -p 7585
strace: Process 7585 attached with 5 threads

(boss) [chen@VM_0_2_centos ~]$ ps auxw -T | fgrep 7585
chen      7585  7585 16.4  1.9 634056 158620 pts/0   Sl+  23:13   4:35 /home/chen/anaconda3/envs/boss/bin/python /home/chen/anaconda3/envs/boss/bin/scrapy crawl product_1
chen      7585  7588  0.0  1.9 634056 158620 pts/0   Sl+  23:13   0:00 /home/chen/anaconda3/envs/boss/bin/python /home/chen/anaconda3/envs/boss/bin/scrapy crawl product_1
chen      7585  7589  0.0  1.9 634056 158620 pts/0   Sl+  23:13   0:00 /home/chen/anaconda3/envs/boss/bin/python /home/chen/anaconda3/envs/boss/bin/scrapy crawl product_1
chen      7585  7592  0.0  1.9 634056 158620 pts/0   Sl+  23:13   0:00 /home/chen/anaconda3/envs/boss/bin/python /home/chen/anaconda3/envs/boss/bin/scrapy crawl product_1
chen      7585  7593  0.0  1.9 634056 158620 pts/0   Sl+  23:13   0:00 /home/chen/anaconda3/envs/boss/bin/python /home/chen/anaconda3/envs/boss/bin/scrapy crawl product_1
chen      8584  8584  0.4  0.0 126348  1036 pts/2    S+   23:36   0:01 strace -f -v -e trace=signal -tt -p 7585

这让我很不解,scrapy是基于单线程的啊,只不过是基于twisted的异步,怎么会有5个threads呢?

https://stackoverflow.com/questions/14694582/stracing-to-attach-to-a-multi-threaded-process

strace -fp <pid> will show the trace of all threads owned by that process – even ones that process already has spawned before strace begins

那试试看 strace -fp 7585有什么输出吧。结果和之前猜测的一样,确实和redis有关。

[pid  7585] sendto(17, "*2\r\n$5\r\nHVALS\r\n$4\r\nadsl\r\n", 25, 0, NULL, 0) = 25
[pid  7585] recvfrom(17, "*1\r\n$45\r\nczhen:a8h54edrwg@36.57."..., 65536, 0, NULL, NULL) = 56
[pid  7585] ioctl(17, FIONBIO, [1])     = 0
[pid  7585] recvfrom(17, 0x564aace95f30, 65536, 0, NULL, NULL) = -1 EAGAIN (Resource temporarily unavailable)
[pid  7585] ioctl(17, FIONBIO, [0])     = 0
[pid  7585] sendto(17, "*2\r\n$5\r\nHVALS\r\n$4\r\nadsl\r\n", 25, 0, NULL, 0) = 25
[pid  7585] recvfrom(17, "*1\r\n$45\r\nczhen:a8h54edrwg@36.57."..., 65536, 0, NULL, NULL) = 56
[pid  7585] ioctl(17, FIONBIO, [1])     = 0
[pid  7585] recvfrom(17, 0x564aace95f30, 65536, 0, NULL, NULL) = -1 EAGAIN (Resource temporarily unavailable)
[pid  7585] ioctl(17, FIONBIO, [0])     = 0
[pid  7585] sendto(17, "*2\r\n$5\r\nHVALS\r\n$4\r\nadsl\r\n", 25, 0, NULL, 0) = 25
[pid  7585] recvfrom(17,  <unfinished ...>
[pid  7593] <... select resumed> <unfinished ...>) = ?
[pid  7593] +++ killed by SIGKILL +++
[pid  7592] <... select resumed> <unfinished ...>) = ?
[pid  7589] <... select resumed> <unfinished ...>) = ?
[pid  7588] <... select resumed> <unfinished ...>) = ?
[pid  7585] <... recvfrom resumed> <unfinished ...>) = ?
[pid  7592] +++ killed by SIGKILL +++
[pid  7588] +++ killed by SIGKILL +++
[pid  7589] +++ killed by SIGKILL +++
+++ killed by SIGKILL +++
Unhandled Error
Traceback (most recent call last):
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/commands/crawl.py", line 27, in run
    self.crawler_process.start()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/crawler.py", line 327, in start
    reactor.run(installSignalHandlers=False)  # blocking call
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/twisted/internet/base.py", line 1283, in run
    self.mainLoop()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/twisted/internet/base.py", line 1292, in mainLoop
    self.runUntilCurrent()
--- <exception caught here> ---
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/twisted/internet/base.py", line 913, in runUntilCurrent
    call.func(*call.args, **call.kw)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/utils/reactor.py", line 50, in __call__
    return self._func(*self._a, **self._kw)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/core/engine.py", line 124, in _next_request
    if not self._next_request_from_scheduler(spider):
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/core/engine.py", line 151, in _next_request_from_scheduler
    request = slot.scheduler.next_request()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy_redis/scheduler.py", line 172, in next_request
    request = self.queue.pop(block_pop_timeout)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy_redis/queue.py", line 115, in pop
    results, count = pipe.execute()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 3967, in execute
    return execute(conn, stack, raise_on_error)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 3863, in _execute_transaction
    self.immediate_execute_command('DISCARD')
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 3789, in immediate_execute_command
    return self.parse_response(conn, command_name, **options)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 3926, in parse_response
    self, connection, command_name, **options)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 892, in parse_response
    response = connection.read_response()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/connection.py", line 752, in read_response
    raise response
redis.exceptions.ResponseError: DISCARD without MULTI

2020-12-18 23:52:39 VM_0_2_centos twisted[7585] CRITICAL Unhandled Error
Traceback (most recent call last):
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/commands/crawl.py", line 27, in run
    self.crawler_process.start()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/crawler.py", line 327, in start
    reactor.run(installSignalHandlers=False)  # blocking call
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/twisted/internet/base.py", line 1283, in run
    self.mainLoop()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/twisted/internet/base.py", line 1292, in mainLoop
    self.runUntilCurrent()
--- <exception caught here> ---
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/twisted/internet/base.py", line 913, in runUntilCurrent
    call.func(*call.args, **call.kw)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/utils/reactor.py", line 50, in __call__
    return self._func(*self._a, **self._kw)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/core/engine.py", line 124, in _next_request
    if not self._next_request_from_scheduler(spider):
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/core/engine.py", line 151, in _next_request_from_scheduler
    request = slot.scheduler.next_request()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy_redis/scheduler.py", line 172, in next_request
    request = self.queue.pop(block_pop_timeout)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy_redis/queue.py", line 115, in pop
    results, count = pipe.execute()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 3967, in execute
    return execute(conn, stack, raise_on_error)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 3863, in _execute_transaction
    self.immediate_execute_command('DISCARD')
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 3789, in immediate_execute_command
    return self.parse_response(conn, command_name, **options)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 3926, in parse_response
    self, connection, command_name, **options)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 892, in parse_response
    response = connection.read_response()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/connection.py", line 752, in read_response
    raise response
redis.exceptions.ResponseError: DISCARD without MULTI

2020-12-18 23:52:39,764 CRITICAL: Unhandled Error
Traceback (most recent call last):
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/commands/crawl.py", line 27, in run
    self.crawler_process.start()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/crawler.py", line 327, in start
    reactor.run(installSignalHandlers=False)  # blocking call
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/twisted/internet/base.py", line 1283, in run
    self.mainLoop()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/twisted/internet/base.py", line 1292, in mainLoop
    self.runUntilCurrent()
--- <exception caught here> ---
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/twisted/internet/base.py", line 913, in runUntilCurrent
    call.func(*call.args, **call.kw)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/utils/reactor.py", line 50, in __call__
    return self._func(*self._a, **self._kw)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/core/engine.py", line 124, in _next_request
    if not self._next_request_from_scheduler(spider):
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/core/engine.py", line 151, in _next_request_from_scheduler
    request = slot.scheduler.next_request()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy_redis/scheduler.py", line 172, in next_request
    request = self.queue.pop(block_pop_timeout)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy_redis/queue.py", line 115, in pop
    results, count = pipe.execute()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 3967, in execute
    return execute(conn, stack, raise_on_error)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 3863, in _execute_transaction
    self.immediate_execute_command('DISCARD')
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 3789, in immediate_execute_command
    return self.parse_response(conn, command_name, **options)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 3926, in parse_response
    self, connection, command_name, **options)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 892, in parse_response
    response = connection.read_response()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/connection.py", line 752, in read_response
    raise response
redis.exceptions.ResponseError: DISCARD without MULTI

2020-12-18 23:52:39 [twisted] CRITICAL: Unhandled Error
Traceback (most recent call last):
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/commands/crawl.py", line 27, in run
    self.crawler_process.start()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/crawler.py", line 327, in start
    reactor.run(installSignalHandlers=False)  # blocking call
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/twisted/internet/base.py", line 1283, in run
    self.mainLoop()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/twisted/internet/base.py", line 1292, in mainLoop
    self.runUntilCurrent()
--- <exception caught here> ---
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/twisted/internet/base.py", line 913, in runUntilCurrent
    call.func(*call.args, **call.kw)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/utils/reactor.py", line 50, in __call__
    return self._func(*self._a, **self._kw)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/core/engine.py", line 124, in _next_request
    if not self._next_request_from_scheduler(spider):
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy/core/engine.py", line 151, in _next_request_from_scheduler
    request = slot.scheduler.next_request()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy_redis/scheduler.py", line 172, in next_request
    request = self.queue.pop(block_pop_timeout)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/scrapy_redis/queue.py", line 115, in pop
    results, count = pipe.execute()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 3967, in execute
    return execute(conn, stack, raise_on_error)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 3863, in _execute_transaction
    self.immediate_execute_command('DISCARD')
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 3789, in immediate_execute_command
    return self.parse_response(conn, command_name, **options)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 3926, in parse_response
    self, connection, command_name, **options)
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/client.py", line 892, in parse_response
    response = connection.read_response()
  File "/home/chen/anaconda3/envs/boss/lib/python3.7/site-packages/redis/connection.py", line 752, in read_response
    raise response
redis.exceptions.ResponseError: DISCARD without MULTI

Killed

那看看redis log吧:

7949:C 18 Dec 23:22:12.386 * RDB: 70 MB of memory used by copy-on-write
1175:M 18 Dec 23:22:12.445 * Background saving terminated with success
1175:M 18 Dec 23:27:13.069 * 10 changes in 300 seconds. Saving...
1175:M 18 Dec 23:27:13.069 * Background saving started by pid 8147
8147:C 18 Dec 23:27:15.698 * DB saved on disk
8147:C 18 Dec 23:27:15.698 * RDB: 26 MB of memory used by copy-on-write
1175:M 18 Dec 23:27:15.787 * Background saving terminated with success
1175:M 18 Dec 23:32:16.092 * 10 changes in 300 seconds. Saving...
1175:M 18 Dec 23:32:16.092 * Background saving started by pid 8426
8426:C 18 Dec 23:32:18.643 * DB saved on disk
8426:C 18 Dec 23:32:18.644 * RDB: 28 MB of memory used by copy-on-write
1175:M 18 Dec 23:32:18.712 * Background saving terminated with success
1175:M 18 Dec 23:37:19.072 * 10 changes in 300 seconds. Saving...
1175:M 18 Dec 23:37:19.073 * Background saving started by pid 8633
8633:C 18 Dec 23:37:21.643 * DB saved on disk
8633:C 18 Dec 23:37:21.644 * RDB: 28 MB of memory used by copy-on-write
1175:M 18 Dec 23:37:21.704 * Background saving terminated with success
1175:M 18 Dec 23:42:22.031 * 10 changes in 300 seconds. Saving...
1175:M 18 Dec 23:42:22.031 * Background saving started by pid 8976
8976:C 18 Dec 23:42:24.615 * DB saved on disk
8976:C 18 Dec 23:42:24.615 * RDB: 26 MB of memory used by copy-on-write
1175:M 18 Dec 23:42:24.658 * Background saving terminated with success
1175:M 18 Dec 23:47:25.029 * 10 changes in 300 seconds. Saving...
1175:M 18 Dec 23:47:25.029 * Background saving started by pid 9177
9177:C 18 Dec 23:47:27.533 * DB saved on disk
9177:C 18 Dec 23:47:27.534 * RDB: 20 MB of memory used by copy-on-write
1175:M 18 Dec 23:47:27.572 * Background saving terminated with success
1175:M 18 Dec 23:52:28.074 * 10 changes in 300 seconds. Saving...
1175:M 18 Dec 23:52:28.074 # Can't save in background: fork: Cannot allocate memory
1175:M 18 Dec 23:52:34.025 * 10 changes in 300 seconds. Saving...
1175:M 18 Dec 23:52:34.025 # Can't save in background: fork: Cannot allocate memory
1175:M 18 Dec 23:52:40.072 * 10 changes in 300 seconds. Saving...
1175:M 18 Dec 23:52:40.073 * Background saving started by pid 9392
9392:C 18 Dec 23:52:42.520 * DB saved on disk
9392:C 18 Dec 23:52:42.520 * RDB: 24 MB of memory used by copy-on-write
1175:M 18 Dec 23:52:42.602 * Background saving terminated with success
1175:M 19 Dec 00:07:43.043 * 1 changes in 900 seconds. Saving...
1175:M 19 Dec 00:07:43.044 * Background saving started by pid 9996
9996:C 19 Dec 00:07:45.624 * DB saved on disk
9996:C 19 Dec 00:07:45.624 * RDB: 16 MB of memory used by copy-on-write
1175:M 19 Dec 00:07:45.661 * Background saving terminated with success

解决方案:redis.conf设置最大内存2gb。记得改为下面的设置

vm.overcommit_memory = 1
vm.overcommit_ratio = 100

内核参数 vm.overcommit_memory 接受三种取值:

  • 0 – Heuristic overcommit handling. 这是缺省值,它允许overcommit,但过于明目张胆的overcommit会被拒绝,比如malloc一次性申请的内存大小就超过了系统总内存。Heuristic的意思是“试探式的”,内核利用某种算法(对该算法的详细解释请看文末)猜测你的内存申请是否合理,它认为不合理就会拒绝overcommit。
  • 1 – Always overcommit. 允许overcommit,对内存申请来者不拒。
  • 2 – Don’t overcommit. 禁止overcommit。