mickh18

Members
  • Posts

    17
  • Joined

  • Last visited

Posts posted by mickh18

  1. On 3/21/2022 at 3:45 PM, RiDDiX said:

     

    Maybe again in english? If you want you "APU" GPU in a VM you should atleast check if it is free to use. Maybe Unraid uses it already. Or try this:

     

    <?xml version='1.0' encoding='UTF-8'?>
    <domain type='kvm'>
      <name>Windows 10 2</name>
      <uuid>129c1316-3854-61d1-b930-c90312e74e6e</uuid >
      <metadata>
        <vmtemplate xmlns="unraid" name="Windows 10" icon="windows.png" os="windows10"/>
      </metadata>
      <memory unit='KiB'>5242880</memory>
      <currentMemory unit='KiB'>5242880</currentMemory>
      <memoryBacking>
        <nosharepages/>
      </memoryBacking>
      <vcpu placement='static'>9</vcpu>
      <cputune>
        <vcpupin vcpu='0' cpuset='0' />
        <vcpupin vcpu='1' cpuset='6'/>
        <vcpupin vcpu='2' cpuset='1'/>
        <vcpupin vcpu='3' cpuset='2'/>
        <vcpupin vcpu='4' cpuset ='8'/>
        <vcpupin vcpu='5' cpuset='3'/>
        <vcpupin vcpu='6' cpuset='4'/>
        <vcpupin vcpu='7' cpuset='10'/>
        <vcpupin vcpu='8' cpuset ='5'/>
      </cputune>
      <os>
        <type arch='x86_64' machine='pc-i440fx-5.1'>hvm</type>
        <loader readonly='yes' type='pflash'>/usr /share/qemu/ovmf-x64/OVMF_CODE-pure-efi.fd</loader>
        <nvram>/etc/libvirt/qemu/nvram/129c1316-3854-61d1-b930-c90312e74e6e_VARS-pure-efi.fd</nvram >
        <boot dev='hd'/>
      </os>
      <features>
        <acpi/>
        <apic/>
        <hyperv>
          <relaxed state='on'/>
          <vapic state='on'/>
          <spinlocks state='on' retries='8191'/>
          <vendor_id state='on' value='1234567890ab'/>
        </hyperv>
        <kvm>
          <hidden state='on'/>
        </kvm>
        <vmport state='off'/>
        <ioapic driver='kvm'/>
      </features>
      <cpu mode='host-passthrough' check='none' migratable='on'>
        <topology sockets='1' dies='1' cores='9' threads='1'/>
        <cache mode='passthrough'/>
        <feature policy='require' name='topoext'/>
      </cpu>
      <clock offset='localtime'>
        <timer name='hypervclock' present='yes'/>
        <timer name='hpet' present='no'/>
      </clock>
      <on_poweroff>destroy</on_poweroff>
      <on_reboot>restart</on_reboot>
      <on_crash>restart</on_crash>
      <devices>
        <emulator>/usr/local/sbin/qemu</emulator>
        <controller type='usb' index='0' model='ich9-ehci1'>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x7'/>
        </controller>
        <controller type='usb' index='0' model='ich9-uhci1'>
          <master startport='0'/>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0' multifunction='on'/>
        </controller>
        <controller type='usb' index='0'模型='ich9-uhci2'>
          <master startport='2'/>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x1'/>
        </controller>
        <controller type='usb' index='0' model=' ich9-uhci3'>
          <master startport='4'/>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x2'/>
        </controller>
        <controller type='pci' index ='0' model='pci-root'/>
        <controller type='virtio-serial' index='0'>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
        </controller>
        <interface type='bridge'>
          <mac address='52:54:00:e8:c3:fd'/>
          <source bridge='br0'/>
          <model type ='virtio-net'/>
          <地址类型='pci'domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
        </interface>
        <serial type='pty'>
          <target type='isa-serial' port='0'>
            <型号名称='isa-serial'/>
          </target>
        </serial>
        <console type='pty'>
          <target type='serial' port='0'/>
        </console>
        <channel type='unix'>
          <target type='virtio' name ='org.qemu.guest_agent.0'/>
          <address type='virtio-serial' controller='0' bus='0' port='1'/>
        </channel>
        <input type='tablet' bus ='usb'>
          <address type='usb' bus='0' port='1'/>
        </input>
        <input type='mouse' bus='ps2'/>
        <input type='keyboard' bus ='ps2'/>
        <hostdev mode='subsystem' type='pci' managed='yes'>
          <driver name='vfio'/>
          <source>
            <address domain='0x0000' bus='0x06' slot='0x00' function='0x0 '/>
          </source>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
        </hostdev>
        <hostdev mode='subsystem' type='pci' managed ='yes'>
          <driver name='vfio'/>
          <source>
            <address domain='0x0000' bus='0x06' slot='0x00' function='0x1'/>
          </source>
          <address type=' pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
        </hostdev>
        <hostdev mode='subsystem' type='pci' managed='yes'>
          <driver name= 'vfio'/>
          <source>
            <address domain='0x0000' bus='0x06' slot='0x00' function='0x6'/>
          </source>
          <address type='pci' domain='0x0000' bus='0x00' slot= '0x06' 函数='0x0'/>
        </hostdev>
        <memballoon model='none'/>
      </devices>
    </domain>

     

    Ive just tried the below and its not workign for me, Can you offer any further assistance please as this is driving me crazy?

     

      <features>
        <acpi/>
        <apic/>   

    <hyperv mode='custom'>
          <relaxed state='on'/>
          <vapic state='on'/>
          <spinlocks state='on' retries='8191'/>
          <vendor_id state='on' value='1234567890ab'/>
        </hyperv>
        <kvm>
          <hidden state='on'/>
        </kvm>
        <vmport state='off'/>
        <ioapic driver='kvm'/>
      </features>

  2. 1 hour ago, mickh18 said:

    Hi Josh

     

    Not sure what's changed but the I've been using staging branch perfectly fine for few weeks and since 18th April it's just stopped processing files even though watcher and run on start is enabled. Even though there are new files for processing it's not adding them to queue. 
     

    ive changed to latest build and it then found the outstanding files that require processing but none of the workers begin to work. 
     

    can you please help? 

    Ignore this I've just read previous post and have deleted the db. It's sprung back into life :)

  3. Hi Josh

     

    Not sure what's changed but the I've been using staging branch perfectly fine for few weeks and since 18th April it's just stopped processing files even though watcher and run on start is enabled. Even though there are new files for processing it's not adding them to queue. 
     

    ive changed to latest build and it then found the outstanding files that require processing but none of the workers begin to work. 
     

    can you please help? 

  4. 5 hours ago, Josh.5 said:

    Sorry, just double checked. I only pushed the fix to the master branch. I've just applied it to the staging branch also. Give it 10 mins and then there should be an update for you to pull.

     

    Sorry again.

    Many thanks Josh. I’ve managed to now install but still loosing access to the UI soon as it starts encoding. 
     

    log attached. 

    F210E78F-A23D-483E-A737-C2A807C90B13.jpeg

  5. 21 minutes ago, Josh.5 said:

    could you also post your config

     

    Many thanks for quick response, im looking into app data folder but nothing visible. tried reinstalling again 

     

    Below screenshot and also docker template output upon install, i just cant figure this out

     

     

    Command:root@localhost:# /usr/local/emhttp/plugins/dynamix.docker.manager/scripts/docker run -d --name='unmanic' --net='host' -e TZ="Europe/London" -e HOST_OS="Unraid" -e 'TCP_PORT_8888'='8888' -e 'PUID'='99' -e 'PGID'='100' -e 'NVIDIA_VISIBLE_DEVICES'='false' -v '/mnt/user/appdata/unmanic':'/config':'rw' -v '/mnt/user/media/movies':'/library/movies':'rw' -v '/mnt/user/media/tv':'/library/tv':'rw' -v '/mnt/user/transcode/unmanic':'/tmp/unmanic':'rw' 'josh5/unmanic'

    ad7c21b71d2838341f57032be5656a733087da0847bc9155a1f99e71d3ae982c

    The command finished successfully!

     

    image.png.6a5031af4f6f67f6de7cd7b4c80ea24e.png

     

    image.thumb.png.66d04a618701d3b801407ccea23fc330.png

  6. On 3/18/2021 at 4:35 PM, rmeaux said:

     

    Your symptoms sound close to mine. I was trying staging and my RAM and CPU would max out about 15% into the first transcode. I would lose web UI response for a bit and it would come back but be stuck on the same current transcode not doing anything. I went back to 0.0.1 and been running fine that way. Josh.5 thinks it may be related to Audio transcode. I tried disabling that while on staging but still had the same issue. I don't know anything about the backend of this so I am at his mercy. I love the work Josh is doing on unManic but I am perfectly happy on 0.0.1 for the time being. He'll get it. He's done nothing but improve it since its birth.  

    Interesting. How can I downgrade to previous versions of Unmanic on Unraid to see if it resolved my issues?

  7. HI I Need help please, unmanic just isnt working how it used to , im on latest unraid OS 6.9 and tried with 2 NVIDIA GPUs get same results. I also keep losing access to web UI, its really weird as ive not had any problems like this before

     

    Deleted cleared data several times and retired installing to no avail, logs below, 

     

    onerror(os.rmdir, path, sys.exc_info())
    File "/usr/lib/python3.6/shutil.py", line 488, in rmtree
    os.rmdir(path)
    OSError: [Errno 39] Directory not empty: '/tmp/unmanic/unmanic_file_conversion-1615972102.5668645'
    Running Unmanic from installed module
    Starting migrations
    There is nothing to migrate
    UnmanicLogger - SETUP LOGGER
    Clearing cache path - /tmp/unmanic/unmanic_file_conversion-1615972102.5668645
    Traceback (most recent call last):
    File "/usr/local/bin/unmanic", line 11, in <module>
    sys.exit(main())
    File "/usr/local/lib/python3.6/dist-packages/unmanic/service.py", line 403, in main
    service.run()
    File "/usr/local/lib/python3.6/dist-packages/unmanic/service.py", line 388, in run
    self.start_threads()
    File "/usr/local/lib/python3.6/dist-packages/unmanic/service.py", line 350, in start_threads
    common.clean_files_in_dir(settings.CACHE_PATH)
    File "/usr/local/lib/python3.6/dist-packages/unmanic/libs/common.py", line 105, in clean_files_in_dir
    shutil.rmtree(root)
    File "/usr/lib/python3.6/shutil.py", line 490, in rmtree
    onerror(os.rmdir, path, sys.exc_info())
    File "/usr/lib/python3.6/shutil.py", line 488, in rmtree
    os.rmdir(path)
    OSError: [Errno 39] Directory not empty: '/tmp/unmanic/unmanic_file_conversion-1615972102.5668645'
    Running Unmanic from installed module
    Starting migrations
    There is nothing to migrate
    UnmanicLogger - SETUP LOGGER
    Clearing cache path - /tmp/unmanic/unmanic_file_conversion-1615972102.5668645
    Traceback (most recent call last):
    File "/usr/local/bin/unmanic", line 11, in <module>
    sys.exit(main())
    File "/usr/local/lib/python3.6/dist-packages/unmanic/service.py", line 403, in main
    service.run()
    File "/usr/local/lib/python3.6/dist-packages/unmanic/service.py", line 388, in run
    self.start_threads()
    File "/usr/local/lib/python3.6/dist-packages/unmanic/service.py", line 350, in start_threads
    common.clean_files_in_dir(settings.CACHE_PATH)
    File "/usr/local/lib/python3.6/dist-packages/unmanic/libs/common.py", line 105, in clean_files_in_dir
    shutil.rmtree(root)
    File "/usr/lib/python3.6/shutil.py", line 490, in rmtree
    onerror(os.rmdir, path, sys.exc_info())
    File "/usr/lib/python3.6/shutil.py", line 488, in rmtree
    os.rmdir(path)
    OSError: [Errno 39] Directory not empty: '/tmp/unmanic/unmanic_file_conversion-1615972102.5668645'

    Unmanic.txt

  8. I have added my server to unraid.net dns so my server is *****************.unraid.net but I’m now unable to access my server remotely using my domain name.
     

    How can I remove unraid.net dns entry to restore external access?

     

    thanks in advance 🙏