Hi!
I have npm running as a rproxy for nextcloud and another services. It's running fine but i have problems with the docker.img file filling up. I identified NPM as the responsible because of the proxy_buffer features. Disable the buffer with "proxy_buffering off" fix the problem but npm becomes a bottleneck, so i need the buffer on.
I tried to mount /tmp and /var/tmp from npm to unraid's /tmp folder for diagnostic purposes but the docker.img file still is filled to 100%, so buffers are not stored there.
I tried the proxy_temp_path directive to force the use of /tmp folder but doesn't work, it still writes inside /var/lib/docker
So, how can i do to force NPM to buffer out of the container? What path i need to mount?
my config for nextcloud in npm is:
{
"id": 1,
"created_on": "2022-05-03 19:42:40",
"modified_on": "2022-05-07 18:31:07",
"owner_user_id": 1,
"domain_names": [
"cloud.mydomain.net"
],
"forward_host": "192.168.10.182",
"forward_port": 7880,
"access_list_id": 0,
"certificate_id": 1,
"ssl_forced": true,
"caching_enabled": true,
"block_exploits": false,
"advanced_config": "proxy_http_version 1.1;\r\n proxy_set_header Upgrade $http_upgrade;\r\n proxy_set_header Connection \"Upgrade\";\r\n proxy_set_header Host $host;\r\n proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\r\n#proxy_request_buffering off;\r\n#proxy_buffering off;\r\nproxy_buffering on;\r\nproxy_buffers 32 4k;\r\nproxy_max_temp_file_size 2048m;\r\nproxy_temp_file_write_size 32k;\r\nproxy_temp_path /tmp;",
"meta": {
"letsencrypt_agree": false,
"dns_challenge": false
},
"allow_websocket_upgrade": false,
"http2_support": true,
"forward_scheme": "http",
"enabled": 1,
"locations": [
{
"path": "/.well-known/carddav",
"advanced_config": "",
"forward_scheme": "http",
"forward_host": "192.168.10.182/remote.php/dav",
"forward_port": 7880
},
{
"path": "/.well-known/caldav",
"advanced_config": "",
"forward_scheme": "http",
"forward_host": "192.168.10.182/remote.php/dav",
"forward_port": 7880
}
],
"hsts_enabled": true,
"hsts_subdomains": false
}
Thanks 🙂