Jump to content

giafidis

Members
  • Posts

    21
  • Joined

  • Last visited

Posts posted by giafidis

  1. Hello everyone,

    I'm encountering an issue with passthrough of the Radeon Vega56 GPU (Sapphire Pulse Vega56) and its audio counterpart in a Windows 10/11 virtual machine. While the GPU passthrough works fine, the audio card associated with the GPU is not recognized by Windows. I have also tested this setup with a Linux Manjaro VM, where the sound is functioning without any problems.

     

    Here are some details regarding my configuration:

     

    • unRAID version: 6.12.2
    • Mainboard: Gigabyte X99-UD4
    • CPU: E5-2699V3
    • GPU IOMMU group: 30 [1002:687f] 04:00.0 VGA compatible controller: Advanced Micro Devices, Inc. [AMD/ATI] Vega 10 XL/XT [Radeon RX Vega 56/64] (rev c3)
    • Audio IOMMU group: 3 [1002:aaf8] 04:00.1 Audio device: Advanced Micro Devices, Inc. [AMD/ATI] Vega 10 HDMI Audio [Radeon Vega 56/64]

     

    Steps I have taken so far to resolve the issue:

    1. Tried both Windows 10 and Windows 11 VMs, but encountered the same results.
    2. Configured the VM with the Q35-7.1 machine type.
    3. Attempted to bind the GPU and audio device but makes no difference.
    4. Modified the XML template for the VM, enabling the multifunction option as suggested by others.
    5. Tested both PCIe ACS override options in the VM Manager Settings.
    6. Tried both Legacy and UEFI boot options in unRAID.
    7. Additionally, I have installedthe AMD Vendor Reset Plugin from ich777 to address any potential problems from the AMD Reset Bug.

     

    Despite these efforts, I have not been able to get the GPU audio device recognized by Windows in the VM. It's worth noting that the GPU is installed in the second slot on my mainboard, and I have tested with and without the VBios ROM without any noticeable difference.


    My VM XML for Reference:

    <?xml version='1.0' encoding='UTF-8'?>
    <domain type='kvm'>
      <name>Windows 10 AMD</name>
      <uuid>da61cd5d-ecab-d060-bab5-b98140e093db</uuid>
      <metadata>
        <vmtemplate xmlns="unraid" name="Windows 10" icon="windows.png" os="windows10"/>
      </metadata>
      <memory unit='KiB'>8388608</memory>
      <currentMemory unit='KiB'>8388608</currentMemory>
      <memoryBacking>
        <nosharepages/>
      </memoryBacking>
      <vcpu placement='static'>15</vcpu>
      <cputune>
        <vcpupin vcpu='0' cpuset='10'/>
        <vcpupin vcpu='1' cpuset='28'/>
        <vcpupin vcpu='2' cpuset='11'/>
        <vcpupin vcpu='3' cpuset='29'/>
        <vcpupin vcpu='4' cpuset='12'/>
        <vcpupin vcpu='5' cpuset='30'/>
        <vcpupin vcpu='6' cpuset='13'/>
        <vcpupin vcpu='7' cpuset='14'/>
        <vcpupin vcpu='8' cpuset='32'/>
        <vcpupin vcpu='9' cpuset='15'/>
        <vcpupin vcpu='10' cpuset='33'/>
        <vcpupin vcpu='11' cpuset='16'/>
        <vcpupin vcpu='12' cpuset='34'/>
        <vcpupin vcpu='13' cpuset='17'/>
        <vcpupin vcpu='14' cpuset='35'/>
      </cputune>
      <resource>
        <partition>/machine</partition>
      </resource>
      <os>
        <type arch='x86_64' machine='pc-q35-7.1'>hvm</type>
        <loader readonly='yes' type='pflash'>/usr/share/qemu/ovmf-x64/OVMF_CODE-pure-efi.fd</loader>
        <nvram>/etc/libvirt/qemu/nvram/da61cd5d-ecab-d060-bab5-b98140e093db_VARS-pure-efi.fd</nvram>
      </os>
      <features>
        <acpi/>
        <apic/>
        <hyperv mode='custom'>
          <relaxed state='on'/>
          <vapic state='on'/>
          <spinlocks state='on' retries='8191'/>
          <vendor_id state='on' value='none'/>
        </hyperv>
      </features>
      <cpu mode='host-passthrough' check='none' migratable='on'>
        <topology sockets='1' dies='1' cores='15' threads='1'/>
        <cache mode='passthrough'/>
      </cpu>
      <clock offset='localtime'>
        <timer name='hypervclock' present='yes'/>
        <timer name='hpet' present='no'/>
      </clock>
      <on_poweroff>destroy</on_poweroff>
      <on_reboot>restart</on_reboot>
      <on_crash>restart</on_crash>
      <devices>
        <emulator>/usr/local/sbin/qemu</emulator>
        <disk type='file' device='disk'>
          <driver name='qemu' type='raw' cache='writeback'/>
          <source file='/mnt/user/domains/Windows 10 AMD/vdisk1.img'/>
          <target dev='hdc' bus='virtio'/>
          <serial>vdisk1</serial>
          <boot order='1'/>
          <address type='pci' domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
        </disk>
        <disk type='block' device='disk'>
          <driver name='qemu' type='raw' cache='writeback'/>
          <source dev='/dev/sdj'/>
          <target dev='hdd' bus='virtio'/>
          <serial>vdisk2</serial>
          <address type='pci' domain='0x0000' bus='0x04' slot='0x00' function='0x0'/>
        </disk>
        <disk type='file' device='cdrom'>
          <driver name='qemu' type='raw'/>
          <source file='/mnt/user/isos/cyg-en-us_windows_10_enterprise_ltsc_2021_x64_dvd_d289cf96.iso'/>
          <target dev='hda' bus='sata'/>
          <readonly/>
          <boot order='2'/>
          <address type='drive' controller='0' bus='0' target='0' unit='0'/>
        </disk>
        <disk type='file' device='cdrom'>
          <driver name='qemu' type='raw'/>
          <source file='/mnt/user/isos/virtio-win-0.1.229-1.iso'/>
          <target dev='hdb' bus='sata'/>
          <readonly/>
          <address type='drive' controller='0' bus='0' target='0' unit='1'/>
        </disk>
        <controller type='usb' index='0' model='qemu-xhci' ports='15'>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
        </controller>
        <controller type='pci' index='0' model='pcie-root'/>
        <controller type='pci' index='1' model='pcie-root-port'>
          <model name='pcie-root-port'/>
          <target chassis='1' port='0x10'/>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0' multifunction='on'/>
        </controller>
        <controller type='pci' index='2' model='pcie-root-port'>
          <model name='pcie-root-port'/>
          <target chassis='2' port='0x11'/>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x1'/>
        </controller>
        <controller type='pci' index='3' model='pcie-root-port'>
          <model name='pcie-root-port'/>
          <target chassis='3' port='0x12'/>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x2'/>
        </controller>
        <controller type='pci' index='4' model='pcie-root-port'>
          <model name='pcie-root-port'/>
          <target chassis='4' port='0x13'/>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x3'/>
        </controller>
        <controller type='pci' index='5' model='pcie-root-port'>
          <model name='pcie-root-port'/>
          <target chassis='5' port='0x14'/>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x4'/>
        </controller>
        <controller type='pci' index='6' model='pcie-root-port'>
          <model name='pcie-root-port'/>
          <target chassis='6' port='0x8'/>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x0'/>
        </controller>
        <controller type='virtio-serial' index='0'>
          <address type='pci' domain='0x0000' bus='0x02' slot='0x00' function='0x0'/>
        </controller>
        <controller type='sata' index='0'>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x1f' function='0x2'/>
        </controller>
        <interface type='bridge'>
          <mac address='52:54:00:be:4a:ce'/>
          <source bridge='br0'/>
          <model type='virtio-net'/>
          <address type='pci' domain='0x0000' bus='0x01' slot='0x00' function='0x0'/>
        </interface>
        <serial type='pty'>
          <target type='isa-serial' port='0'>
            <model name='isa-serial'/>
          </target>
        </serial>
        <console type='pty'>
          <target type='serial' port='0'/>
        </console>
        <channel type='unix'>
          <target type='virtio' name='org.qemu.guest_agent.0'/>
          <address type='virtio-serial' controller='0' bus='0' port='1'/>
        </channel>
        <input type='tablet' bus='usb'>
          <address type='usb' bus='0' port='1'/>
        </input>
        <input type='mouse' bus='ps2'/>
        <input type='keyboard' bus='ps2'/>
        <audio id='1' type='none'/>
        <hostdev mode='subsystem' type='pci' managed='yes'>
          <driver name='vfio'/>
          <source>
            <address domain='0x0000' bus='0x04' slot='0x00' function='0x0'/>
          </source>
          <address type='pci' domain='0x0000' bus='0x05' slot='0x00' function='0x0' multifunction='on'/>
        </hostdev>
        <hostdev mode='subsystem' type='pci' managed='yes'>
          <driver name='vfio'/>
          <source>
            <address domain='0x0000' bus='0x04' slot='0x00' function='0x1'/>
          </source>
          <address type='pci' domain='0x0000' bus='0x05' slot='0x00' function='0x1'/>
        </hostdev>
        <memballoon model='none'/>
      </devices>
      <seclabel type='dynamic' model='dac' relabel='yes'/>
    </domain>

     

    I would appreciate any insights or suggestions you may have. Thank you in advance for your help!


    Cheers,

    giafidis

  2. On 5/14/2021 at 1:30 PM, HyperV said:

    I had exactly the same issue and could not find any solutions on the forum or the internet. So I did some digging myself and found the cause of the issue. The docker update check script gets the remote digest of the latest tag from the docker repository via a header called 'Docker-Content-Digest'. The script checks for this header with a case-sensitive regex pattern. Manually querying the docker hub registry gives me a header called 'docker-content-digest' (mind the casing). The docker hub registry must have recently changed the casing of this header, because it broke for me in the last 24 hours. I'm running on Unraid 6.8.3 still, so I'm not 100% sure if this issue also exists in 6.9.x.

    If you feel up to it, you could quite easily fix this yourself until there is a real fix. I'll describe the steps below:

    Open file: /usr/local/emhttp/plugins/dynamix.docker.manager/include/DockerClient.php

    Go to line 457.

    There you should look for the text:

    
    @Docker-Content-Digest:\s*(.*)@

    and replace it with:

    
    @Docker-Content-Digest:\s*(.*)@i


    Save the file.

    This will make the header check case-insensitive and should make it work again.

    You are a Lifesaver! Was looking for a Solution for days!

  3. @DoeBoye I just saw the Announcement and was thinking to try it out, but i switched back to 6.8.3 because i tried some things in the meantime and got super frustrated:

    I checked my System Log and saw some repeated Error Messages: pcieport 0000:00:03.0: [ 6] Bad TLP, PCIe Bus Error: severity=Corrected, type=Data Link Layer, id=0010(Transmitter ID). They are registering after some activity from the PCIe Bus. After researching a bit, i turned out that x99 Chipsets are known for this Issue, but i couldn't remember it being present on 6.8.3. With Kernel Parameter "pcie_aspm=off" in the Flash Boot menu, you disable the Active State Power Management for PCIe on boot and the Errors don't occur anymore. I was convinced that this was causing the Passthrough Issues, but after some testing, nothing changed...

  4. 12 hours ago, DoeBoye said:

    For the record, I have the same problem passing through a Nvidia GT 710. I tried the script above in user scripts at boot. No change. I tried binding the GT 710 to the vfio-pci driver at boot using the new system devices checkbox feature, but no luck... Very odd. Worked flawlessly in 6.8.3

    What Motherboard do you have?

    I also tried the Binding Feature on all my GPUs (Nvidia and AMD) and it doesn't change anything.

     

    @Celsian

    Thanks your support Mate!

     

    I already dumped it with the Script. It didn't make any difference...

    Side note: The script doesn't work for me in 6.9. I get always an error message that i should bind the GPU with vfio-pci first and then try again. I tried that out, but no luck. In 6.8.3 however, the script works without any Binding at all...

     

  5. 4 hours ago, fearlessknight said:

    Script location: /tmp/user.scripts/tmpScripts/GPUPass/script
    Note that closing this window will abort the execution of this script
    /tmp/user.scripts/tmpScripts/GPUPass/script: line 3: /sys/class/vtconsole/vtcon1/bind: No such file or directory
    /tmp/user.scripts/tmpScripts/GPUPass/script: line 4: echo: write error: No such device

     

    Same as yours. What do you normally use to remote into your VM? Maybe try a fresh VM install and run the script.

    I use TightVNC, for VMs that are using VNC and GPU Passthrough.

     

    I just finished installed a fresh Win10 VM. I used VNC for the Installation and switch after to my 1050 Ti. After Driver Installation, the VM locks up and reboots. I don't know what else to try out...

     

    This is the Log of the new VM. It looks normal to me:

    
    ErrorWarningSystemArrayLogin
    
    -smp 4,sockets=1,dies=1,cores=2,threads=2 \
    -uuid e8002a45-a8da-7e91-2076-7fbd4f5077de \
    -display none \
    -no-user-config \
    -nodefaults \
    -chardev socket,id=charmonitor,fd=31,server,nowait \
    -mon chardev=charmonitor,id=monitor,mode=control \
    -rtc base=localtime \
    -no-hpet \
    -no-shutdown \
    -boot strict=on \
    -device ich9-usb-ehci1,id=usb,bus=pci.0,addr=0x7.0x7 \
    -device ich9-usb-uhci1,masterbus=usb.0,firstport=0,bus=pci.0,multifunction=on,addr=0x7 \
    -device ich9-usb-uhci2,masterbus=usb.0,firstport=2,bus=pci.0,addr=0x7.0x1 \
    -device ich9-usb-uhci3,masterbus=usb.0,firstport=4,bus=pci.0,addr=0x7.0x2 \
    -device ahci,id=sata0,bus=pci.0,addr=0x3 \
    -device virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=0x4 \
    -blockdev '{"driver":"file","filename":"/mnt/user/isos/en_windows_10_enterprise_ltsc_2019_x64_dvd_74865958.iso","node-name":"libvirt-3-storage","auto-read-only":true,"discard":"unmap"}' \
    -blockdev '{"node-name":"libvirt-3-format","read-only":true,"driver":"raw","file":"libvirt-3-storage"}' \
    -device ide-cd,bus=sata0.0,drive=libvirt-3-format,id=sata0-0-0,bootindex=2 \
    -blockdev '{"driver":"file","filename":"/mnt/user/isos/virtio-win-0.1.190-1.iso","node-name":"libvirt-2-storage","auto-read-only":true,"discard":"unmap"}' \
    -blockdev '{"node-name":"libvirt-2-format","read-only":true,"driver":"raw","file":"libvirt-2-storage"}' \
    -device ide-cd,bus=sata0.1,drive=libvirt-2-format,id=sata0-0-1 \
    -blockdev '{"driver":"file","filename":"/mnt/disks/SanDisk_SD7SB2Q-512G-1006_152978401993/Windows 10/vdisk1.img","node-name":"libvirt-1-storage","cache":{"direct":false,"no-flush":false},"auto-read-only":true,"discard":"unmap"}' \
    -blockdev '{"node-name":"libvirt-1-format","read-only":false,"cache":{"direct":false,"no-flush":false},"driver":"raw","file":"libvirt-1-storage"}' \
    -device ide-hd,bus=sata0.2,drive=libvirt-1-format,id=sata0-0-2,bootindex=1,write-cache=on \
    -netdev tap,fd=33,id=hostnet0 \
    -device virtio-net,netdev=hostnet0,id=net0,mac=xx:xx:xx:xx:xx:xx,bus=pci.0,addr=0x2 \
    -chardev pty,id=charserial0 \
    -device isa-serial,chardev=charserial0,id=serial0 \
    -chardev socket,id=charchannel0,fd=34,server,nowait \
    -device virtserialport,bus=virtio-serial0.0,nr=1,chardev=charchannel0,id=channel0,name=org.qemu.guest_agent.0 \
    -device usb-tablet,id=input0,bus=usb.0,port=1 \
    -device 'vfio-pci,host=0000:02:00.0,id=hostdev0,bus=pci.0,addr=0x5,romfile=/mnt/user/My Stuff/Dokumente/VGA BIOS/Gigabyte GeForce GTX 1050 Ti 4GB/1050ti _owndump.rom' \
    -device vfio-pci,host=0000:02:00.1,id=hostdev1,bus=pci.0,addr=0x6 \
    -sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny \
    -msg timestamp=on
    2021-03-07 20:19:06.395+0000: Domain id=1 is tainted: high-privileges
    2021-03-07 20:19:06.395+0000: Domain id=1 is tainted: host-cpu
    char device redirected to /dev/pts/0 (label charserial0)

     

  6. Upgraded back to 6.9 and tried it out but unfortunately, it doesn't work for me and im not sure if i did something wrong. As suggested, i created the User Script and set it to start before the Array does. Im getting this Output:

     

    Script location: /tmp/user.scripts/tmpScripts/GPU Passthrough Fix 6.9/script
    Note that closing this window will abort the execution of this script
    /tmp/user.scripts/tmpScripts/GPU Passthrough Fix 6.9/script: line 3: /sys/class/vtconsole/vtcon1/bind: No such file or directory
    /tmp/user.scripts/tmpScripts/GPU Passthrough Fix 6.9/script: line 4: echo: write error: No such device

     

    Starting the VM, has the same effects as before (Blackscreen and Bootloop).

  7. 9 minutes ago, fearlessknight said:

    Just tried this and was able to get the GPU to passthrough successfully along with the Error 43. I'm going to pass this along to another thread with this info and see if they can't fix this during the next patch. I will mention you as well. Do you have any issues running software while hyperV is off? Or have you moved back to 6.8.3? 

    I already switched back, because my Plex Server runs under a VM with GPU Passthrough.

     

    I didn't test it in detail, but what i tested, ran without problems. It is more performance bound i believe.

     

    If the ask for any Logs, i made a copy while i was on 6.9. Hopefully it gets fixed.

     

     

  8. 17 hours ago, fearlessknight said:

    Good afternoon everyone,

     

    I also have come across this issue were I was previously on 6.9.0 RC2 (Non-working) and upgraded to the stable 6.9 (non-working) in an attempt to get my GPU passthrough to work (2 GPUs\2VMs). After countless attempts of trial and error. I may consider downgrading to 6.8.3 until a fix is put in place. 

    If anyone requires any screenshots or info, I'd be happy to provide.

     

    Thanks,

    Glad to see that im not the only one with this Problem. What is your Hardware Configuration?

     

    My Server Specs:

    Motherboard: Gigabyte Technology Co., Ltd. - X99-UD4-CF

    Processor: Intel® Xeon® CPU E5-2640 v3 @ 2.60GHz

    Memory: 128 GB DDR4

    GPU1: Radeon HD 6450

    GPU2: GeForce GTX 1050 Ti

    GPU3: GeForce GT 1030

  9. Hello Folks,

     

    i went ahead and upgraded my unRAID Server yesterday, from 6.8.3 to 6.9. I bypassed every 6.9 Beta and RC Release and waited for the Stable to come out. Update went very smooth, but on first boot i noticed one of my Win10 VMs, where a GTX 1050 Ti is passed through, trying to start but reverting to an endless Bootloop and CPU Usage peaking at 100%. Then i tried to use VNC instead and the Machine booted without any Problems. I suspected that maybe the new QEMU Version might not compatible and installed a fresh new VM with the same Card passing through. Same problem there. I starting researching and found out that disabling Hyper-V in the Template solves the Problem. I tried that out and windows booted with the Nvidia Driver showing Error 43. Uninstalled the Drivers with DDU and reinstalled them didn't make any difference.

     

    Next, I tried to Bind the GPU under Tools->System Devices, switch unRAID from UEFI boot to Legacy Boot, even tried a different GPU (GT 1030) and its always the same. Bootloop with Hyper-V enabled and Error 43 when disabled.

     

    Tried downgrading to 6.8.3, but then all my Docker and VM Configuration was gone, not knowing how to recover them without doing all over again.

     

    Any Help is really appreciated it.

     

    My VM XML Config (as it worked with 6.8.3):

     

    <?xml version='1.0' encoding='UTF-8'?>
    <domain type='kvm'>
      <name>DiGi-Server</name>
      <uuid>2f2c3e6c-de2b-dfb5-53b3-ad81a3d6531d</uuid>
      <metadata>
        <vmtemplate xmlns="unraid" name="Windows Server 2016" icon="windows.png" os="windows2016"/>
      </metadata>
      <memory unit='KiB'>16777216</memory>
      <currentMemory unit='KiB'>16777216</currentMemory>
      <memoryBacking>
        <nosharepages/>
      </memoryBacking>
      <vcpu placement='static'>4</vcpu>
      <cputune>
        <vcpupin vcpu='0' cpuset='4'/>
        <vcpupin vcpu='1' cpuset='12'/>
        <vcpupin vcpu='2' cpuset='5'/>
        <vcpupin vcpu='3' cpuset='13'/>
      </cputune>
      <os>
        <type arch='x86_64' machine='pc-i440fx-4.2'>hvm</type>
        <loader readonly='yes' type='pflash'>/usr/share/qemu/ovmf-x64/OVMF_CODE-pure-efi.fd</loader>
        <nvram>/etc/libvirt/qemu/nvram/2f2c3e6c-de2b-dfb5-53b3-ad81a3d6531d_VARS-pure-efi.fd</nvram>
      </os>
      <features>
        <acpi/>
        <apic/>
        <hyperv>
          <relaxed state='on'/>
          <vapic state='on'/>
          <spinlocks state='on' retries='8191'/>
          <vendor_id state='on' value='none'/>
        </hyperv>
      </features>
      <cpu mode='host-passthrough' check='none' migratable='on'>
        <topology sockets='1' dies='1' cores='2' threads='2'/>
        <cache mode='passthrough'/>
      </cpu>
      <clock offset='localtime'>
        <timer name='hypervclock' present='yes'/>
        <timer name='hpet' present='no'/>
      </clock>
      <on_poweroff>destroy</on_poweroff>
      <on_reboot>restart</on_reboot>
      <on_crash>restart</on_crash>
      <devices>
        <emulator>/usr/local/sbin/qemu</emulator>
        <disk type='file' device='cdrom'>
          <driver name='qemu' type='raw'/>
          <source file='/mnt/user/isos/virtio-win-0.1.190-1.iso'/>
          <target dev='hdb' bus='sata'/>
          <readonly/>
          <address type='drive' controller='0' bus='0' target='0' unit='1'/>
        </disk>
        <disk type='file' device='disk'>
          <driver name='qemu' type='raw' cache='writeback'/>
          <source file='/mnt/disks/SanDisk_SD7SB2Q-512G-1006_152978401993/DiGi-Server/vdisk1.img'/>
          <target dev='hdc' bus='sata'/>
          <boot order='1'/>
          <address type='drive' controller='0' bus='0' target='0' unit='2'/>
        </disk>
        <controller type='sata' index='0'>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
        </controller>
        <controller type='virtio-serial' index='0'>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
        </controller>
        <controller type='pci' index='0' model='pci-root'/>
        <controller type='usb' index='0' model='nec-xhci' ports='15'>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
        </controller>
        <interface type='bridge'>
          <mac address='52:54:00:43:94:40'/>
          <source bridge='br0'/>
          <model type='virtio'/>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
        </interface>
        <serial type='pty'>
          <target type='isa-serial' port='0'>
            <model name='isa-serial'/>
          </target>
        </serial>
        <console type='pty'>
          <target type='serial' port='0'/>
        </console>
        <channel type='unix'>
          <target type='virtio' name='org.qemu.guest_agent.0'/>
          <address type='virtio-serial' controller='0' bus='0' port='1'/>
        </channel>
        <input type='tablet' bus='usb'>
          <address type='usb' bus='0' port='1'/>
        </input>
        <input type='mouse' bus='ps2'/>
        <input type='keyboard' bus='ps2'/>
        <hostdev mode='subsystem' type='pci' managed='yes'>
          <driver name='vfio'/>
          <source>
            <address domain='0x0000' bus='0x02' slot='0x00' function='0x0'/>
          </source>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
        </hostdev>
        <hostdev mode='subsystem' type='pci' managed='yes'>
          <driver name='vfio'/>
          <source>
            <address domain='0x0000' bus='0x02' slot='0x00' function='0x1'/>
          </source>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
        </hostdev>
        <memballoon model='none'/>
      </devices>
    </domain>

     

     

×
×
  • Create New...