• Posts

  • Joined

  • Last visited


  • Gender

Recent Profile Visitors

The recent visitors block is disabled and is not being shown to other users.

a12vman's Achievements


Apprentice (3/14)



  1. This Windows 10 VM was working fine before the OS Upgrade 20 6.10.1 but now VM never shows up on the network after I start it. Works fine if I turn off PassThru and use VNC instead. I am using a 750TI. Any ideas? Here is VM Log. -device virtio-blk-pci,bus=pci.0,addr=0x5,drive=libvirt-3-format,id=virtio-disk2,bootindex=1,write-cache=on \ -blockdev '{"driver":"file","filename":"/mnt/user/Backup/Software/Windows10RS4/Windows 10 RS4 Pro SupremeOS Edition (x64) 2018-P2P.iso","node-name":"libvirt-2-storage","auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-2-format","read-only":true,"driver":"raw","file":"libvirt-2-storage"}' \ -device ide-cd,bus=ide.0,unit=0,drive=libvirt-2-format,id=ide0-0-0,bootindex=2 \ -blockdev '{"driver":"file","filename":"/mnt/user/Backup/Software/virtio-win-0.1.190-1.iso","node-name":"libvirt-1-storage","auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-1-format","read-only":true,"driver":"raw","file":"libvirt-1-storage"}' \ -device ide-cd,bus=ide.0,unit=1,drive=libvirt-1-format,id=ide0-0-1 \ -netdev tap,fd=37,id=hostnet0 \ -device virtio-net,netdev=hostnet0,id=net0,mac=52:54:00:5f:dc:a4,bus=pci.0,addr=0x4 \ -chardev pty,id=charserial0 \ -device isa-serial,chardev=charserial0,id=serial0,index=0 \ -chardev socket,id=charchannel0,fd=35,server=on,wait=off \ -device virtserialport,bus=virtio-serial0.0,nr=1,chardev=charchannel0,id=channel0,name=org.qemu.guest_agent.0 \ -device usb-tablet,id=input0,bus=usb.0,port=1 \ -audiodev '{"id":"audio1","driver":"none"}' \ -vnc,websocket=5700,audiodev=audio1 \ -k en-us \ -device qxl-vga,id=video0,max_outputs=1,ram_size=67108864,vram_size=67108864,vram64_size_mb=0,vgamem_mb=16,bus=pci.0,addr=0x2 \ -device virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x6 \ -sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny \ -msg timestamp=on char device redirected to /dev/pts/0 (label charserial0) qxl_send_events: spice-server bug: guest stopped, ignoring 2022-05-23T17:46:21.564375Z qemu-system-x86_64: terminating on signal 15 from pid 11070 (/usr/sbin/libvirtd) 2022-05-23 17:46:21.795+0000: shutting down, reason=shutdown 2022-05-23 17:47:04.868+0000: starting up libvirt version: 8.2.0, qemu version: 6.2.0, kernel: 5.15.40-Unraid, hostname: MediaTower LC_ALL=C \ PATH=/bin:/sbin:/usr/bin:/usr/sbin \ HOME=/var/lib/libvirt/qemu/domain-3-W10 \ XDG_DATA_HOME=/var/lib/libvirt/qemu/domain-3-W10/.local/share \ XDG_CACHE_HOME=/var/lib/libvirt/qemu/domain-3-W10/.cache \ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain-3-W10/.config \ /usr/local/sbin/qemu \ -name guest=W10,debug-threads=on \ -S \ -object '{"qom-type":"secret","id":"masterKey0","format":"raw","file":"/var/lib/libvirt/qemu/domain-3-W10/master-key.aes"}' \ -machine pc-i440fx-5.1,usb=off,dump-guest-core=off,mem-merge=off,memory-backend=pc.ram \ -accel kvm \ -cpu host,migratable=on,hv-time=on,hv-relaxed=on,hv-vapic=on,hv-spinlocks=0x1fff,hv-vendor-id=none,host-cache-info=on,l3-cache=off \ -m 4096 \ -object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":4294967296}' \ -overcommit mem-lock=off \ -smp 4,sockets=1,dies=1,cores=2,threads=2 \ -uuid d07abd87-eb87-d4df-0f35-11c60bebc1c8 \ -display none \ -no-user-config \ -nodefaults \ -chardev socket,id=charmonitor,fd=36,server=on,wait=off \ -mon chardev=charmonitor,id=monitor,mode=control \ -rtc base=localtime \ -no-hpet \ -no-shutdown \ -boot strict=on \ -device ich9-usb-ehci1,id=usb,bus=pci.0,addr=0x7.0x7 \ -device ich9-usb-uhci1,masterbus=usb.0,firstport=0,bus=pci.0,multifunction=on,addr=0x7 \ -device ich9-usb-uhci2,masterbus=usb.0,firstport=2,bus=pci.0,addr=0x7.0x1 \ -device ich9-usb-uhci3,masterbus=usb.0,firstport=4,bus=pci.0,addr=0x7.0x2 \ -device virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=0x3 \ -blockdev '{"driver":"file","filename":"/mnt/user/domains/W10/vdisk1.img","node-name":"libvirt-3-storage","cache":{"direct":false,"no-flush":false},"auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-3-format","read-only":false,"cache":{"direct":false,"no-flush":false},"driver":"raw","file":"libvirt-3-storage"}' \ -device virtio-blk-pci,bus=pci.0,addr=0x4,drive=libvirt-3-format,id=virtio-disk2,bootindex=1,write-cache=on \ -blockdev '{"driver":"file","filename":"/mnt/user/Backup/Software/Windows10RS4/Windows 10 RS4 Pro SupremeOS Edition (x64) 2018-P2P.iso","node-name":"libvirt-2-storage","auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-2-format","read-only":true,"driver":"raw","file":"libvirt-2-storage"}' \ -device ide-cd,bus=ide.0,unit=0,drive=libvirt-2-format,id=ide0-0-0,bootindex=2 \ -blockdev '{"driver":"file","filename":"/mnt/user/Backup/Software/virtio-win-0.1.190-1.iso","node-name":"libvirt-1-storage","auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-1-format","read-only":true,"driver":"raw","file":"libvirt-1-storage"}' \ -device ide-cd,bus=ide.0,unit=1,drive=libvirt-1-format,id=ide0-0-1 \ -netdev tap,fd=37,id=hostnet0 \ -device virtio-net,netdev=hostnet0,id=net0,mac=52:54:00:5f:dc:a4,bus=pci.0,addr=0x2 \ -chardev pty,id=charserial0 \ -device isa-serial,chardev=charserial0,id=serial0,index=0 \ -chardev socket,id=charchannel0,fd=35,server=on,wait=off \ -device virtserialport,bus=virtio-serial0.0,nr=1,chardev=charchannel0,id=channel0,name=org.qemu.guest_agent.0 \ -device usb-tablet,id=input0,bus=usb.0,port=1 \ -audiodev '{"id":"audio1","driver":"none"}' \ -device vfio-pci,host=0000:06:00.0,id=hostdev0,bus=pci.0,addr=0x5 \ -device vfio-pci,host=0000:06:00.1,id=hostdev1,bus=pci.0,addr=0x6 \ -sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny \ -msg timestamp=on char device redirected to /dev/pts/0 (label charserial0)
  2. Yes that was the problem. Surprised that this got past RC status.
  3. I ran the upgrade this morning thru the webui Update OS - big mistake. Flash Drive update completed and prompted for a reboot. Did the reboot but server not available. It's not that I can't login to the WebUI, it is not even on the network. What do I do now? I haven't touched anything on my server it has been fine for months, finding it hard to believe that there is some sort of hardware failure after the reboot.
  4. I don't know what I did wrong, but I completely lost my Sonarr Library and my Plex Library. I could have restored, I had 7 days of CA Backups. Instead I changed the AppData Config Path to explicitly use /Cache_NVME/Appdata/Sonarr and the same for Plex. I rebuilt both libraries and all is well. It's a housekeeping task I have been wanting to do for some time. The only downside is I lost all of my viewing history.
  5. I did follow SI-1's process of migrating cache data using the cache settings. Moved appdata from Old Cache to Array Moved appdata from Array to New Cache I think where I went wrong. I didn't update the AppData Config path from /mnt/user/appdata/sonarr to /Cache_SSD/Appdata/sonarr I repointed my containers. Everything seems to be working now. Thanks for your help, not sure how I missed that step!
  6. I added a 2nd Cache Drive several months ago following SI-1's cookbook. My original cache drive is Cache_SSD and my new one is Cache_NVME. I modified my share settings so all folders that I want to cache point to Cache_SSD. Docker, VM, and Appdata are set to Cache_NVME. Everything checked out when I was done all Dockers and VM's were functioning. Just looking around on Krusader and noticed that Cache_SSD still has an AppData folder with a folder for each Docker. I changed the folder Name of Sonarr at this location to Sonarr1. My Sonarr library is now empty(says no series found) even after I renamed it back to Sonarr and restarted the app. The container path for all of my dockers is still set to \Mnt\User\AppData\<Docker Name>, do I need to update these paths? Is there any way to get back my Sonarr Library? I do CA Appdata Backups Daily. Sizes of Appdata: Cache_SSD: 758MB CACHE_NVME: 42GB Thanks, a12vman
  7. Sorry i should have been more specific. By Log Looks Normal, I meant to say that the VM Log output is the same whether I boot into Windows XP using VNC(that works) or whether I try to boot using GPU Passthru(that doesn't work) . Also the system log is not revealing anything. Here is what my VM config looks like: mediatower-diagnostics-20220130-0728.zip
  8. Thanks I never thought of checking unofficial driver support. My bigger issue is that I can't even get the VM to boot up when I change the Graphics to the 750ti and start the VM. The VM is running, the logs look normal but it never appears on the network so I can connect via RDP. I ping the Machine Name from a windows client but it's not available. The VM Log looks normal and I don't see anything in the System Log. I have created Windows 7/10/11 Vm's using this card for GPU Passthru and had no issues.
  9. I was able to stand up a Windows XP Pro VM thanks to advice on this forum. I have it working with VNC but want to do GPU Passthru. I have a GTX750ti card. The oldest OS drivers available from Nvidia are for Windows 7 64. I can start the VM with GPU Passthru but it never boots up. Is this too much of an ask(assigning a video card to an OS that predates the graphics card? I have some XP-era games that I would like to play that won't run on Windows 7. Thanks in Advance, a12vman
  10. I sued the Reboot Option from the menu. When the server came back up I got an "Unclean Shutdown" and it started a parity check. What Gives? Am I not supposed to use this option?
  11. Never thought about a reboot even though I stopped/started the array- you were right on with that suggestion. Cache disappeared and now I have cache_ssd and cache_nvme as the 2 cache pools. I updated my Krusader config to add paths for cache_nvme and cache_ssd. I updated appdata,domains,and system shares. First set cache to yes to move everything off cache into the array. After mover finished I then update these 3 shares to prefer and set the pool to cache_nvme. After the mover finished I set the cache for all remaining shares to cache_ssd. I stopped Docker. Copied docker.img to cache_nvme, updated docker config to point at cache_nvme for Docker.img. I change the Docker appdata location from /mnt/user/appdata/ to /mnt/cache_nvme/appdata/. I restarted Docker Engine, everything seems fine. Plex is a bit faster than it was before. My issue is that I am seeing a split of appdata for all of my dockers. There is a small(in size) amount of appdata remaining in the old appdata location on cache_ssd(734MB). Appdata on cache_nvme is 43GB. See my Screenshot below of Sonarr and Plex AppData. Why do I have split content and what can I do about it?
  12. I need assistance. I am trying to add a 2nd cache drive(1TB NVME). At present I have a 240gb ssd as my only cache. I renamed my existing cache drive to cache_ssd. I stopped the array added the nvme, formatted it as BTRFS and named it is cache_nvme. Created a 2nd pool and added cache_nvme to this pool. Started the array. Modified cache settings on shares. I set appdata, system and domains to use Prefer: cache_nvme I set all other shares to use cache_ssd. My existing appdata is ~ 45gb I back it up daily to another server. When I open Krusader and look at \Cache it is only showing 46.1 MB. Something is not right. All my dockers are still working fine(like Plex). When I go into Krusader settings under the cache variable(Host Path) and browse /mnt/ I see folders for Cache, Cache_ssd, and Cache_nvme I reviewed SI-1's video on cache pools and how to create them. Clearly I missed a step here. How do I move all references from /Cache as far as AppData goes? This makes me extremely nervous.... My end goal is to have all appdata and system on cache_nvme and the rest of shares caching to cache_ssd.
  13. How much reading/writing takes place against Docker.img? Is it just during the start of the Docker or is it continuous?