casperse

Members
  • Posts

    447
  • Joined

  • Last visited

Everything posted by casperse

  1. So I found a Unraid plugin in the stack for permissions But when I add it I get error: Pre-processing - Set file permissions for UnRaid Plugin error! Error: Command failed: chown nobody:users I remember in the docker resilio-sync you could do a: Any input on how to get this working? Thanks!
  2. rootfs / rootfs rw,size=32739196k,nr_inodes=8184799 0 0 proc /proc proc rw,relatime 0 0 sysfs /sys sysfs rw,relatime 0 0 devtmpfs /dev devtmpfs rw,relatime,size=32739204k,nr_inodes=8184801,mode=755 0 0 devpts /dev/pts devpts rw,relatime,gid=5,mode=620,ptmxmode=000 0 0 tmpfs /dev/shm tmpfs rw,relatime 0 0 fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0 cgroup_root /sys/fs/cgroup tmpfs rw,relatime,size=8192k,mode=755 0 0 cpuset /sys/fs/cgroup/cpuset cgroup rw,relatime,cpuset 0 0 cpu /sys/fs/cgroup/cpu cgroup rw,relatime,cpu 0 0 cpuacct /sys/fs/cgroup/cpuacct cgroup rw,relatime,cpuacct 0 0 blkio /sys/fs/cgroup/blkio cgroup rw,relatime,blkio 0 0 memory /sys/fs/cgroup/memory cgroup rw,relatime,memory 0 0 devices /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0 freezer /sys/fs/cgroup/freezer cgroup rw,relatime,freezer 0 0 net_cls /sys/fs/cgroup/net_cls cgroup rw,relatime,net_cls 0 0 perf_event /sys/fs/cgroup/perf_event cgroup rw,relatime,perf_event 0 0 net_prio /sys/fs/cgroup/net_prio cgroup rw,relatime,net_prio 0 0 hugetlb /sys/fs/cgroup/hugetlb cgroup rw,relatime,hugetlb 0 0 pids /sys/fs/cgroup/pids cgroup rw,relatime,pids 0 0 tmpfs /var/log tmpfs rw,relatime,size=131072k,mode=755 0 0 /dev/sda1 /boot vfat rw,noatime,nodiratime,fmask=0177,dmask=0077,codepage=437,iocharset=iso8859-1,shortname=mixed,flush,errors=remount-ro 0 0 /dev/loop0 /lib/modules squashfs ro,relatime 0 0 /dev/loop1 /lib/firmware squashfs ro,relatime 0 0 hugetlbfs /hugetlbfs hugetlbfs rw,relatime,pagesize=2M 0 0 overlay /lib/modules overlay rw,relatime,lowerdir=/lib/modules,upperdir=/var/local/overlay/lib/modules,workdir=/var/local/overlay-work/lib/modules 0 0 overlay /lib/firmware overlay rw,relatime,lowerdir=/lib/firmware,upperdir=/var/local/overlay/lib/firmware,workdir=/var/local/overlay-work/lib/firmware 0 0 rootfs /mnt rootfs rw,size=32739196k,nr_inodes=8184799 0 0 tmpfs /mnt/disks tmpfs rw,relatime,size=1024k 0 0 tmpfs /mnt/remotes tmpfs rw,relatime,size=1024k 0 0 nfsd /proc/fs/nfs nfsd rw,relatime 0 0 nfsd /proc/fs/nfsd nfsd rw,relatime 0 0 tmpfs /tmp/EmbyRamScratch tmpfs rw,relatime,size=8388608k 0 0 tmpfs /tmp/PlexRamScratch tmpfs rw,relatime,size=8388608k 0 0 tmpfs /tmp/EmbyRamScratch tmpfs rw,relatime,size=8388608k 0 0 tmpfs /tmp/PlexRamScratch tmpfs rw,relatime,size=8388608k 0 0 /dev/mapper/md1 /mnt/disk1 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md2 /mnt/disk2 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md3 /mnt/disk3 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md4 /mnt/disk4 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md5 /mnt/disk5 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md6 /mnt/disk6 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md7 /mnt/disk7 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md8 /mnt/disk8 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md9 /mnt/disk9 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md10 /mnt/disk10 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md11 /mnt/disk11 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md12 /mnt/disk12 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md13 /mnt/disk13 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md14 /mnt/disk14 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md15 /mnt/disk15 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md17 /mnt/disk17 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md18 /mnt/disk18 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md19 /mnt/disk19 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md20 /mnt/disk20 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md21 /mnt/disk21 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/md22 /mnt/disk22 xfs rw,noatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/nvme0n1p1 /mnt/cache btrfs rw,noatime,ssd,space_cache=v2,subvolid=5,subvol=/ 0 0 shfs /mnt/user0 fuse.shfs rw,nosuid,nodev,noatime,user_id=0,group_id=0,allow_other 0 0 shfs /mnt/user fuse.shfs rw,nosuid,nodev,noatime,user_id=0,group_id=0,allow_other 0 0 tmpfs /tmp/EmbyRamScratch tmpfs rw,relatime,size=8388608k 0 0 tmpfs /tmp/PlexRamScratch tmpfs rw,relatime,size=8388608k 0 0 /dev/loop2 /var/lib/docker btrfs rw,noatime,ssd,space_cache=v2,subvolid=5,subvol=/ 0 0 /dev/loop2 /var/lib/docker/btrfs btrfs rw,noatime,ssd,space_cache=v2,subvolid=5,subvol=/ 0 0 /dev/loop3 /etc/libvirt btrfs rw,noatime,ssd,space_cache=v2,subvolid=5,subvol=/ 0 0 nsfs /var/run/docker/netns/default nsfs rw 0 0 nsfs /var/run/docker/netns/af0ca5d48ca0 nsfs rw 0 0 nsfs /var/run/docker/netns/c69b5061ae08 nsfs rw 0 0 nsfs /var/run/docker/netns/a2eec510e8c2 nsfs rw 0 0 nsfs /var/run/docker/netns/6d7e60f58521 nsfs rw 0 0 nsfs /var/run/docker/netns/2f86374148d1 nsfs rw 0 0 nsfs /var/run/docker/netns/08afabb265ec nsfs rw 0 0 nsfs /var/run/docker/netns/e56b4af7f70b nsfs rw 0 0 nsfs /var/run/docker/netns/c6f9f26bc3bc nsfs rw 0 0 nsfs /var/run/docker/netns/bea51fff217e nsfs rw 0 0 nsfs /var/run/docker/netns/d331e0d43b95 nsfs rw 0 0 nsfs /var/run/docker/netns/fcc5d03ff157 nsfs rw 0 0 nsfs /var/run/docker/netns/4942e60e1f4d nsfs rw 0 0 nsfs /var/run/docker/netns/80bd94893048 nsfs rw 0 0 nsfs /var/run/docker/netns/c2d0c3ca95d4 nsfs rw 0 0 nsfs /var/run/docker/netns/0ed243b5f45c nsfs rw 0 0 nsfs /var/run/docker/netns/a4af87ec6a28 nsfs rw 0 0 nsfs /var/run/docker/netns/a43c3a83de47 nsfs rw 0 0 /dev/mapper/SEED /mnt/disks/SEED xfs rw,noatime,nodiratime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 /dev/mapper/SEED_NEW /mnt/disks/SEED_NEW xfs rw,noatime,nodiratime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0 nsfs /var/run/docker/netns/5da03b4a5235 nsfs rw 0 0 nsfs /var/run/docker/netns/f27c01f97146 nsfs rw 0 0 nsfs /var/run/docker/netns/0596f205e063 nsfs rw 0 0 nsfs /var/run/docker/netns/f5960a6c3a6b nsfs rw 0 0 nsfs /var/run/docker/netns/78640e0fa51f nsfs rw 0 0 nsfs /var/run/docker/netns/dec306ebfaa4 nsfs rw 0 0 nsfs /var/run/docker/netns/2058a66d1423 nsfs rw 0 0
  3. Sure thing! - The mount and drive works perfectly so this is just a UI thing 🙂 diagnostics-20210926-1203.zip
  4. Removed the partition on the new drive and formattet id with XFS encrupt. did a reboote and now have the old drive with xfs, xfs again?
  5. Hi Big fan! I just added a new UAD drive that I wanted to replace with a new bigger drive Adding and mounting the drive was no problem I then ran the: rsync -avPX /mnt/disks/SEED/ /mnt/disks/SEED18 Also did a update of any new files: rsync -auv /mnt/disks/SEED/ /mnt/disks/SEED18 I then renamed the new drive to the old drive name and stopped and started the Array And now it looks like I have a partition on the new drive in the size of the old one? But it looked okay before the restart? Also why does the FS for the format says "luks" ? before reboot it was "xfs,xfs"
  6. Happy to hear you found a solution! 🙂 No answers normally means that no one knows the answer to your question😞 You are using Unraid as a baremetal for running a Synology NAS with 5 passthrough drives. My setup is much simpler just a small partition for the Synology apps 100G And a VM disc for some photo data that I can mirror to the Unraid drives as a backup So I can restore easily when a upgrade fails (Using the VM backup app)
  7. Hi Everyone I have for a very long time used my Quadro P2000 for my dockers (Shared between encoder needs) using passthrough and it works perfectly! But my CPU have a iGP but never managed to get it working with my VM's (Intel® Xeon® E-2176G CPU @ 3.70GHz) I have now read about allot of updates and I can see plugin support making it easier to see all IO and HW specs on Unraid What I am asking is there any "Easy" way to get the iGPU up in the list of passthrough HW on the VM configuration UI webpage? My usage would be to use the iGPU on a VM running adobe for some light video editing My Go file: Do I need to remove the above lines for the iGPU in the GO file now? Dont think I need to add anything to the global conf file? Again I did try to get this working some time ago (Years) but with all the improvements to Unraid it might work now? (Runing the latest and greatest version 6.9.2 Cheers and thanks for all your valuable input and this great forum
  8. Update I tried this: server { listen 443 ssl; listen [::]:443 ssl; server_name maindomain.dk; include /config/nginx/ssl.conf; # add_header X-Frame-Options "SAMEORIGIN" always; add_header Strict-Transport-Security "max-age=15768000; includeSubDomians; preload;"; client_max_body_size 0; location / { include /config/nginx/proxy.conf; resolver 127.0.0.11 valid=30s; set $upstream_app nextcloud; set $upstream_port 443; set $upstream_proto https; proxy_pass $upstream_proto://$upstream_app:$upstream_port; proxy_max_temp_file_size 2048m; } # Make a regex exception for `/.well-known` so that clients can still # access it despite the existence of the regex rule location ^~ /.well-known { location = /.well-known/carddav { return 301 /remote.php/dav/; } location = /.well-known/caldav { return 301 /remote.php/dav/; } # Anything else is dynamically handled by Nextcloud location ^~ /.well-known { return 301 /index.php$uri; } try_files $uri $uri/ =404; } } And I got it reduced to this last one: To anyone finding this "webfinger" error Last error is related to cache (In chrome do the following) Open Dev Tools (F12), and while this is open right click on "normal" refresh button on your top left and select Empty cache and hard reload. And all us ok
  9. Configuration change needed after latest Nextcloud update to Nextcloud 21.0.1 Error message: My existing configuration in Swag: server { listen 443 ssl; listen [::]:443 ssl; server_name maindomain.dk; include /config/nginx/ssl.conf; # add_header X-Frame-Options "SAMEORIGIN" always; add_header Strict-Transport-Security "max-age=15768000; includeSubDomians; preload;"; client_max_body_size 0; location / { include /config/nginx/proxy.conf; resolver 127.0.0.11 valid=30s; set $upstream_app nextcloud; set $upstream_port 443; set $upstream_proto https; proxy_pass $upstream_proto://$upstream_app:$upstream_port; proxy_max_temp_file_size 2048m; } } I have found different solution on the net that I cant "translate" into my SWAG configuration file location = /.well-known/webfinger { rewrite ^/.well-known/webfinger /public.php?service=webfinger last; } location = /.well-known/nodeinfo { rewrite ^/.well-known/nodeinfo /public.php?service=nodeinfo last; } Or this one: location ^~ /.well-known { location = /.well-known/carddav { return 301 /remote.php/dav/; } location = /.well-known/caldav { return 301 /remote.php/dav/; } # Anything else is dynamically handled by Nextcloud location ^~ /.well-known { return 301 /index.php$uri; } try_files $uri $uri/ =404; } If anyone have this working then it would be great if you could share your configuration file
  10. Hi All I hope someone can help me out? I have successfully had this running with Sonarr & Radarr but now I am moving it to another where the download path is different Is it possible to change the default /Data to /downloads/ looking at Sabnzd I can see that it says: And it would be easy to just change it in the Docker: The reason I would like to change it is that I have a working setup "Remote Path Mappings" So I need for all download clients to look in the same defined path Hope this makes sence to some that might have the same setup?
  11. This is not the XML its the HW passthrough of the eth. - I didnt do any manuel editing I just crossed [X] of the 4 LAN in the UI (Nothing virtual, MAC etc clean passthrough of the HW) Space invader makes a great video on how to get this working (Sorry whole family have ben down with Corona so that's why I haven't replied before now 🙂)
  12. Thanks @saarg totally forgot about the capitol letters (The name came from the Docker pull) But still seems like I am missing a connection (If anyone have any input to what I am missing then please give me a shout) Swag is working for all other dockers (And I am getting cert. on all domains and sub domains) The webserver is running and working 192.168.0.6:25568 (This port is also listed below on the docker) The mineos docker creates a minecraft folder in the appdata (Not a mineos folder) don't know if this makes any difference My config file is now: server { listen 443 ssl; listen [::]:443 ssl; server_name map.*; include /config/nginx/ssl.conf; client_max_body_size 0; location / { include /config/nginx/proxy.conf; resolver 127.0.0.11 valid=30s; set $upstream_app mineos-node; set $upstream_port 25568; set $upstream_proto http; proxy_pass $upstream_proto://$upstream_app:$upstream_port; } location ~ (/mineos-node)?/api { include /config/nginx/proxy.conf; resolver 127.0.0.11 valid=30s; set $upstream_app mineos-node; set $upstream_port 25568; set $upstream_proto http; proxy_pass $upstream_proto://$upstream_app:$upstream_port; } } Keep getting: Kind of stuck here have tried allot of different things but end up breaking Swag for all other containers 😞 UPdate found this: https://gist.github.com/DmitryRendov/1efb672a0733aca5314dc3332d9823ac But this seems to overcomplicate a simple link to port 25568 (In the above example the default port is 8123) OK this works! server { listen 443 ssl; listen [::]:443 ssl; server_name map.*; include /config/nginx/ssl.conf; client_max_body_size 0; # enable for ldap auth, fill in ldap details in ldap.conf #include /config/nginx/ldap.conf; location / { proxy_pass http://192.168.0.6:25568; proxy_set_header Host $host; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } location /api/websocket { proxy_pass http://192.168.0.6:25568/api/websocket; proxy_set_header Host $host; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } }
  13. Looking into the logs it dosent even seem like it is reading the file? I have updated it to this: server { listen 443 ssl; listen [::]:443 ssl; server_name map.*; include /config/nginx/ssl.conf; client_max_body_size 0; # enable for ldap auth, fill in ldap details in ldap.conf #include /config/nginx/ldap.conf; location / { # enable the next two lines for http auth #auth_basic "Restricted"; #auth_basic_user_file /config/nginx/.htpasswd; # enable the next two lines for ldap auth #auth_request /auth; #error_page 401 =200 /login; include /config/nginx/proxy.conf; resolver 127.0.0.11 valid=30s; set $upstream_app MineOS-node; set $upstream_port 25568; set $upstream_proto http; proxy_pass $upstream_proto://$upstream_app:$upstream_port; } location ~ (/MineOS-node)?/api { include /config/nginx/proxy.conf; resolver 127.0.0.11 valid=30s; set $upstream_app MineOS-node; set $upstream_port 25568; set $upstream_proto http; proxy_pass $upstream_proto://$upstream_app:$upstream_port; } } Should the location ~ (/MineOS-node)?/api { Be replaced with the local IP:PORT? sorry I have 8 different dockers working with SWAG but this one eludes me
  14. Hi All Not sure this is possible using SWAG but here it goes... I have installed the MineOS docker for Minecraft servers and on one of these servers I have a webserver running "Dynmap" rendering a full map of the Minecraft server (Since this is just a view I would like this accessible on the web) But when I point to the "MineOS" which also have a webser UI !! I cant connect to the Dynmap on port 25568 Any way to use swag without pointing to a specific docker? So far I have used this conf for my sub domian map.domain.com and I can see in the log that the cert. is OK server { listen 443 ssl; listen [::]:443 ssl; server_name map.*; include /config/nginx/ssl.conf; client_max_body_size 0; location / { include /config/nginx/proxy.conf; resolver 127.0.0.11 valid=30s; set $upstream_app MineOS-node; set $upstream_port 25568; set $upstream_proto http; proxy_pass $upstream_proto://$upstream_app:$upstream_port; proxy_set_header Range $http_range; proxy_set_header If-Range $http_if_range; } }
  15. The 36xx Models have 4 Intel Ethernet cards so I ended up dedicating such a card as a HW passthrough and it works perfectly I had trouble when I only used one of my MB ethernet I now have speed and connectivity like on my Unraid
  16. I still havent managed to go pass version "DSM 6.2.3-25426 Update 3" if anyone have a 6.9 I would like to know how?
  17. So you created your own Docker? or did you install it as a VM like me? I found that even if I got the Docker working and could access the UI allot of the plugins would NOT work! (They needed the fixed IP, which I never could get working on my Unraid server) There seem to be many problem with running HOOBS or Homebridge in a Docker Another thing people have to fix is the FFMPEG codec path to get Cameras working like this: The VM setup just works Update: I now have both HOOBS and Homebridge running as VM's and all my plugins work in Homebridge (I even like the UI better than HOOBS!) HOOBS is still Hit and miss on the configuration the UI on Homebridge will tell you more about inputting the correct information as examples
  18. Wow this is really fast and better scaling than the Docker (More Icons on the web UI) and fixed IP is easy since it now has a MAC and I can just do a DHCP reservation for this (I spend all night trying to get the docker working) and it took 10 min to create the VM For anyone else who wants to try this in a VM instead of Docker on Unraid: Download Fedora ISO: Fedora-Server-dvd-x86_64-33-1.2.iso Install Fedora on Unraid as VM: (I need to update the icon for HOOBS 🙂 ) Click create and go to your VNC Fill out everything ("X" SSH) Install and wait Instruction after this is Copy paste from here LINK Update Fedora and go for Coffee Set Hostname Install Avahi Enable Avahi Now reboot If you do not want to enter your password in the command line, you can setup sudo to not prompt for a password. Nano comes shipped with Fedora. You will need to edit your sudoers file. By default your admin account is part of the wheel group. This is the entry you need to change. Look for this line in the file. %wheel ALL=(ALL) ALL And change it to this. To exit nano, press ctrl+x, then y and then enter to save. Now you can start HOOBS without a password. Now we are ready to install HOOBS. Wait and access HOOBS wit a fixed IP on http://[IP]/ Much Better with the icon :
  19. Nice to know that other people are struggling to... I think I will try to create a VM instead I did this with Home Assistant and its rock solid! Would also eliminate the IP problems.... and if I do transcoding then this might be better?
  20. Did you find a solution to the HOOBS problem? I am struggling with the same problem: https://forums.unraid.net/topic/89206-support-malvarez00-hoobs/?do=findComment&comment=933413
  21. I can see the new fixed IP for the docker on my router UDM But I can not access the docker UI? (I set the port to 8282) I dont know if its the docker(Hoobs or its my Unraid setup with Br0?
  22. Update starting from scratch....Even if I can connect to the UI and install plugins I can not connect to HOOBS as a bridge without a fixed IP? I really need some help getting this running with a fixed IP? Can anyone help me? So far the "Custom: br0" didn't work I couldn't connect to the IP I also tried using default port 8080 on the fixed IP but no success Funny I am running +20 dockers and 5 VMs but this HOOBS docker has me beaten 😫 Update do I really need to enable this... in order to get this Docker working? no other way?
  23. I just did a second install its all running but the UI is still unreachable? I even did a fixed http (That worked before?) In the logfile I can see that it is running fine? So I cant install a new one, and I cant repair the old one ? Something is preventing the WebUI from working? (And yes it was buggy to get working for some of us just read this thread,,,, but I had it working)
  24. Anyone with something to try? I can see in the log that its running....fine But the WebUI is unreachable all the other dockers is fine. I even tried to do a reboot of the Unraid server (Desperate, been running for some months now) different browsers, different PC's