#!/usr/bin/python3

# This file is part of Cockpit.
#
# Copyright (C) 2021 Red Hat, Inc.
#
# Cockpit is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Cockpit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Cockpit; If not, see <http://www.gnu.org/licenses/>.

import os
import sys
import time
import xml.etree.ElementTree as ET

# import Cockpit's machinery for test VMs and its browser test API
TEST_DIR = os.path.dirname(__file__)
sys.path.append(os.path.join(TEST_DIR, "common"))
sys.path.append(os.path.join(os.path.dirname(TEST_DIR), "bots/machine"))

from machineslib import VirtualMachinesCase  # noqa
from testlib import nondestructive, test_main, wait, Error, skipImage, timeout  # noqa
from parent import BOTS_DIR  # noqa

SYSTEM_LOCAL_CONF = '''<!DOCTYPE busconfig PUBLIC
"-//freedesktop//DTD D-Bus Bus Configuration 1.0//EN"
"http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
<busconfig>
    <policy user="root">
        <allow eavesdrop="true"/>
        <allow eavesdrop="true" send_destination="*"/>
    </policy>
</busconfig>'''


def get_next_free_target(used_targets):
    i = 0
    while ("vd" + chr(97 + i) in used_targets):
        i += 1

    used_targets.append("vd" + chr(97 + i))
    return used_targets


def release_target(used_targets, target):
    used_targets.remove(target)


@nondestructive
class TestMachinesDisks(VirtualMachinesCase):

    def wait_for_disk_stats(self, name, target):
        b = self.browser
        try:
            with b.wait_timeout(10):
                b.wait_visible(f"#vm-{name}-disks-{target}-used")  # wait for disk statistics to show up
        except Error as ex:
            if not ex.msg.startswith('timeout'):
                raise
            # stats did not show up, check if user message showed up
            print("Libvirt version does not support disk statistics")
            b.wait_visible(f"#vm-{name}-disks-notification")

    def testDiskEdit(self):
        b = self.browser
        m = self.machine

        def open(target):
            b.click(f"#vm-subVmTest1-disks-{target}-edit")
            b.wait_visible(f"#vm-subVmTest1-disks-{target}-edit-dialog")

        def cancel(target):
            b.click(f"#vm-subVmTest1-disks-{target}-edit-dialog-cancel")
            b.wait_not_present(f"#vm-subVmTest1-disks-{target}-edit-dialog")

        def save(target, xfail=None):
            b.click(f"#vm-subVmTest1-disks-{target}-edit-dialog-save")
            if xfail:
                b.wait_in_text(f"#vm-subVmTest1-disks-{target}-edit-dialog .pf-c-alert", xfail)
            else:
                b.wait_not_present(f"#vm-subVmTest1-disks-{target}-edit-dialog")

        self.createVm("subVmTest1")

        # prepare libvirt storage pools
        p1 = os.path.join(self.vm_tmpdir, "vm_one")
        m.execute(f"mkdir --mode 777 {p1}")
        m.execute(f"virsh pool-create-as myPoolOne --type dir --target {p1}")
        m.execute("virsh vol-create-as myPoolOne mydisk --capacity 100M --format raw")  # raw support shareable
        m.execute("virsh vol-create-as myPoolOne mydisk2 --capacity 100M --format raw")
        m.execute("virsh vol-create-as myPoolOne mydisk3 --capacity 100M --format qcow2")
        m.execute("virsh vol-create-as myPoolOne mydisk4 --capacity 100M --format qcow2")
        wait(lambda: all(disk in m.execute("virsh vol-list myPoolOne") for disk in ["mydisk", "mydisk2", "mydisk3", "mydisk4"]))

        m.execute(f"virsh attach-disk --domain subVmTest1 --source {p1}/mydisk --target vde --targetbus virtio --persistent")
        m.execute(f"virsh attach-disk --domain subVmTest1 --source {p1}/mydisk2 --target vdf --targetbus virtio")
        m.execute(f"virsh attach-disk --domain subVmTest1 --source {p1}/mydisk3 --target vdg --targetbus virtio --subdriver qcow2 --persistent")

        self.login_and_go("/machines")
        b.wait_in_text("body", "Virtual machines")
        self.waitVmRow("subVmTest1")
        b.wait_in_text("#vm-subVmTest1-system-state", "Running")

        self.goToVmPage("subVmTest1")

        # Test non-persistent disks are not configurable
        b.wait_not_present("#vm-subVmTest1-disks-vdf-edit")

        # Test close button
        open("vdg")
        b.click("#vm-subVmTest1-disks-vdg-edit-dialog > button:first-child")
        b.wait_not_present("#vm-subVmTest1-disks-vdg-edit-dialog")

        # Test qcow2 disk has only readonly attribute configurable
        open("vdg")
        b.wait_visible("#vm-subVmTest1-disks-vdg-edit-readonly")
        b.wait_visible("#vm-subVmTest1-disks-vdg-edit-writable")
        b.wait_not_present("#vm-subVmTest1-disks-vdg-edit-writable-shareable")
        cancel("vdg")

        # Test configuration of readonly and shareable attributes
        open("vde")
        # Changing readonly with running VM
        b.set_checked("#vm-subVmTest1-disks-vde-edit-readonly", True)

        # Tooltip in dialog should show
        b.wait_visible("#vm-subVmTest1-disks-vde-edit-idle-message")

        # Save changes
        save("vde")
        # See tooltip present in disk listing table
        b.wait_visible("#vm-subVmTest1-disks-vde-access-tooltip")

        # Shut off VM and see state has changed
        self.performAction("subVmTest1", "forceOff")
        # Check change has been applied after shutoff
        b.wait_in_text("#vm-subVmTest1-disks-vde-access", "Read-only")
        # See tooltip no longer present in disk listing table
        b.wait_not_present("#vm-subVmTest1-disks-vde-access-tooltip")

        # Test configuration of readonly and shareable attributes for Shut off VM
        open("vde")
        # Changing readonly
        b.set_checked("#vm-subVmTest1-disks-vde-edit-writable-shareable", True)
        # Tooltip in dialog should not be present
        b.wait_not_present("#vm-subVmTest1-disks-vde-edit-idle-message")

        # Close dialog
        save("vde")
        b.wait_in_text("#vm-subVmTest1-disks-vde-access", "Writeable and shared")
        b.wait_not_present("#vm-subVmTest1-disks-vde-access-tooltip")

        b.wait_in_text("#vm-subVmTest1-disks-vde-bus", "virtio")
        b.wait_not_present("#vm-subVmTest1-disks-vde-cache")
        # Change bus type to scsi and cache mode to writeback
        open("vde")
        b.select_from_dropdown("#vm-subVmTest1-disks-vde-edit-bus-type", "scsi")
        b.select_from_dropdown("#vm-subVmTest1-disks-vde-edit-cache-mode", "writeback")

        # Close dialog
        save("vde")
        # Target has changed from vdX to sdX
        b.wait_in_text("#vm-subVmTest1-disks-sda-bus", "scsi")
        b.wait_in_text("#vm-subVmTest1-disks-sda-cache", "writeback")

        # Configure readonly for the qcow2 disk
        open("vdg")
        b.set_checked("#vm-subVmTest1-disks-vdg-edit-readonly", True)

        save("vdg")
        b.wait_not_present("#vm-subVmTest1-disks-vdg-access-tooltip")
        b.wait_in_text("#vm-subVmTest1-disks-vdg-access", "Read-only")

        if m.execute("virsh --version") >= "6.10.0":
            # Check that errors appear correctly and disappear when closing the dialog and reopening
            m.execute(f"virsh attach-disk --domain subVmTest1 --source {p1}/mydisk4 --target sdb --targetbus sata --persistent")
            b.reload()
            b.enter_page('/machines')
            open("sdb")
            b.set_checked("#vm-subVmTest1-disks-sdb-edit-readonly", True)
            save("sdb", "readonly sata disks are not supported")
            cancel("sdb")
            open("sdb")
            b.wait_not_present("#vm-subVmTest1-disks-sdb-edit-dialog .pf-c-alert")
            cancel("sdb")

        # Virtio bus type should not be shown for CDROM devices
        m.execute("touch /var/lib/libvirt/novell.iso")
        m.execute("virsh attach-disk --domain subVmTest1 --source /var/lib/libvirt/novell.iso --target hda --type cdrom --mode readonly --persistent")
        # virsh attach-disk want send an event for offline VM changes
        b.reload()
        b.enter_page('/machines')
        open("hda")
        b._wait_present("#vm-subVmTest1-disks-hda-edit-bus-type option[value=sata]")
        self.assertFalse(b.is_present("#vm-subVmTest1-disks-hda-edit-bus-type option[value=virtio]"))

        # As CDROMs are readonly access mode editing should be disabled
        b.wait_visible("#vm-subVmTest1-disks-hda-edit-readonly")
        b.wait_not_present("#vm-subVmTest1-disks-hda-edit-writable")
        b.wait_not_present("#vm-subVmTest1-disks-hda-edit-writable-shareable")
        cancel("hda")

        # Start Vm
        b.click("#vm-subVmTest1-system-run")
        b.wait_in_text("#vm-subVmTest1-system-state", "Running")

        # Test disk's bus and cache cannot be changed on running VM
        open("sda")
        b.wait_visible("#vm-subVmTest1-disks-sda-edit-bus-type:disabled")
        b.wait_visible("#vm-subVmTest1-disks-sda-edit-cache-mode:disabled")

        # Disks on non-persistent VM cannot be edited
        m.execute("virsh undefine subVmTest1")
        b.wait_not_present("#vm-subVmTest1-disks-vde-edit")

    def testDisks(self):
        b = self.browser
        m = self.machine
        prefix = "#vm-subVmTest1-disks-adddisk"

        self.createVm("subVmTest1")
        self.createVm("subVmTest2")

        self.login_and_go("/machines")
        b.wait_in_text("body", "Virtual machines")
        self.waitVmRow("subVmTest1")

        b.wait_in_text("#vm-subVmTest1-system-state", "Running")

        self.goToVmPage("subVmTest1")

        # Test basic disk properties
        b.wait_in_text("#vm-subVmTest1-disks-vda-bus", "virtio")

        b.wait_in_text("#vm-subVmTest1-disks-vda-device", "disk")

        b.wait_in_text("#vm-subVmTest1-disks-vda-source-file", "/var/lib/libvirt/images/subVmTest1-2.img")

        # Test domstats
        self.wait_for_disk_stats("subVmTest1", "vda")
        if b.is_present("#vm-subVmTest1-disks-vda-used"):
            b.wait_in_text("#vm-subVmTest1-disks-vda-used", "GiB")
            self.assertRegex(b.text("#vm-subVmTest1-disks-vda-used"), r"^(0|0\.0|0\.01) GiB$")

        # Test add disk by external action
        m.execute("qemu-img create -f raw /var/lib/libvirt/images/image3.img 128M")
        # attach to the virtio bus instead of ide
        m.execute("virsh attach-disk subVmTest1 /var/lib/libvirt/images/image3.img vdc")

        b.wait_visible("#vm-subVmTest1-disks-vda-used")

        b.wait_in_text("#vm-subVmTest1-disks-vda-bus", "virtio")

        b.wait_in_text("#vm-subVmTest1-disks-vdc-bus", "virtio")
        b.wait_in_text("#vm-subVmTest1-disks-vdc-device", "disk")
        b.wait_in_text("#vm-subVmTest1-disks-vdc-source-file", "/var/lib/libvirt/images/image3.img")

        self.wait_for_disk_stats("subVmTest1", "vdc")
        if b.is_present("#vm-subVmTest1-disks-vdc-used"):
            b.wait_in_text("#vm-subVmTest1-disks-vdc-used", "0")
            b.wait_in_text("#vm-subVmTest1-disks-vdc-capacity", "0.12")  # 128 MB

        # Check a warning message about a volume being used by another VM
        b.click(f"{prefix}")
        b.click(f"{prefix}-useexisting")
        b.select_from_dropdown("#vm-subVmTest1-disks-adddisk-existing-select-pool", "images")
        b.select_from_dropdown("#vm-subVmTest1-disks-adddisk-existing-select-volume", "subVmTest2-2.img")
        b.wait_in_text("#vm-subVmTest1-disks-adddisk-existing-select-volume-helper", "used by subVmTest2")
        b.click(".pf-c-modal-box__footer button:contains(Cancel)")

        # Test remove disk - by external action
        m.execute("virsh detach-disk subVmTest1 vdc")
        print("Restarting vm-subVmTest1, might take a while")
        self.performAction("subVmTest1", "forceReboot")

        b.wait_visible("#vm-subVmTest1-disks-vda-device")
        b.wait_not_present("#vm-subVmTest1-disks-vdc-device")

        # Check when no storage pool and adding existed disk
        # delete the default pool
        m.execute("virsh pool-destroy images; virsh pool-undefine images")
        # Open "add disk" dialog
        b.click(f"{prefix}")
        b.click(f"{prefix}-useexisting")
        # Check
        b.wait_not_present("#navbar-oops")
        b.wait_visible(f"{prefix}-existing-select-pool:disabled")
        b.wait_in_text(f"{prefix}-existing-select-pool:disabled", "No storage pools available")
        b.wait_visible(f"{prefix}-dialog-add:disabled")

    class VMInsertMediaDialog(object):
        def __init__(
            self, test_obj,
            vm_name='subVmTest1',
            mode="custom-path", target='sda',
            pool_name=None, volume_name=None,
            file_path=None,
            force=False,
        ):
            self.test_obj = test_obj
            self.mode = mode
            self.vm_name = vm_name
            self.file_path = file_path
            self.pool_name = pool_name
            self.volume_name = volume_name
            self.target = target
            self.force = force

        def _get_disks(self):
            m = self.test_obj.machine

            virsh_output = m.execute(f"virsh -c qemu:///system domblklist {self.vm_name} --details").rstrip('\n')
            lines = virsh_output.splitlines()[2:]
            disks = {}
            for line in lines:
                cells = line.split()
                # use disk's target (3rd column in table) as key
                disks[cells[2]] = {
                    'type': cells[0],
                    'device': cells[1],
                    'source': cells[3],
                }

            return disks

        def execute(self):
            self.open() \
                .fill() \
                .insert() \
                .verify()

            if not self.force:
                self.eject()
            else:
                self.force_eject()
            self.verify_eject()

        def open(self):
            b = self.test_obj.browser
            prefix = f"#vm-{self.vm_name}-disks-{self.target}-insert"
            b.click(prefix)  # button
            b.wait_in_text(".pf-c-modal-box__title", "Insert disc media")

            b.wait_visible(f"{prefix}-dialog-adddisk-custompath:checked")
            if self.mode == "use-existing":
                b.click(f"{prefix}-dialog-adddisk-useexisting")

            return self

        def fill(self):
            b = self.test_obj.browser
            if self.mode == "custom-path":
                b.wait_visible(f"#vm-{self.vm_name}-disks-{self.target}-insert-dialog-adddisk-file")
                # Type in file path
                b.set_file_autocomplete_val(f"#vm-{self.vm_name}-disks-{self.target}-insert-dialog-adddisk-file", self.file_path)
            elif self.mode == "use-existing":
                b.wait_visible(f"#vm-{self.vm_name}-disks-{self.target}-insert-dialog-adddisk-existing-select-pool:enabled")
                # Choose storage pool
                b.select_from_dropdown(f"#vm-{self.vm_name}-disks-{self.target}-insert-dialog-adddisk-existing-select-pool", self.pool_name)
                # Select from the available volumes
                b.select_from_dropdown(f"#vm-{self.vm_name}-disks-{self.target}-insert-dialog-adddisk-existing-select-volume", self.volume_name)

            return self

        def insert(self):
            b = self.test_obj.browser
            b.click(".pf-c-modal-box__footer button:contains(Insert)")

            return self

        def verify(self):
            b = self.test_obj.browser
            b.wait_not_present(".pf-c-modal-box")

            disks = self._get_disks()

            if self.mode == "custom-path":
                # verify UI shows correct media
                b.wait_in_text(f'#vm-{self.vm_name}-disks-{self.target}-source-file', self.file_path)
                # verify virsh shows correct media
                self.test_obj.assertEqual(disks[self.target]['type'], "file")
                self.test_obj.assertEqual(disks[self.target]['device'], "cdrom")
                self.test_obj.assertEqual(disks[self.target]['source'], self.file_path)
            else:
                # verify UI shows correct media
                b.wait_in_text(f'#vm-{self.vm_name}-disks-{self.target}-source-pool', self.pool_name)
                b.wait_in_text(f'#vm-{self.vm_name}-disks-{self.target}-source-volume', self.volume_name)
                # verify virsh shows correct media
                self.test_obj.assertEqual(disks[self.target]['type'], "volume")
                self.test_obj.assertEqual(disks[self.target]['device'], "cdrom")
                self.test_obj.assertEqual(disks[self.target]['source'], self.volume_name)

            return self

        def eject(self):
            b = self.test_obj.browser
            b.click(f"#vm-{self.vm_name}-disks-{self.target}-eject-button")  # button
            b.wait_in_text(".pf-c-modal-box__title", "Eject disc from VM")

            if self.mode == "custom-path":
                b.wait_in_text(f"#vm-{self.vm_name}-disks-{self.target}-modal-description-file", self.file_path)
            elif self.mode == "use-existing":
                b.wait_in_text(f"#vm-{self.vm_name}-disks-{self.target}-modal-description-pool dd", self.pool_name)
                b.wait_in_text(f"#vm-{self.vm_name}-disks-{self.target}-modal-description-volume dd", self.volume_name)

            b.click(".pf-c-modal-box__footer button:contains(Eject)")
            b.wait_not_present(".pf-c-modal-box")

            return self

        def force_eject(self):
            b = self.test_obj.browser
            m = self.test_obj.machine

            b.click(f"#vm-{self.vm_name}-disks-{self.target}-eject-button")  # button
            b.wait_in_text(".pf-c-modal-box__title", "Eject disc from VM")

            # "Force eject" button is only shown if regular ejection fail
            # This might be a bit dirty, but one easy way to cause regular ejection to
            # fail is to poweroff virtual machine just before "Eject" button is clicked
            # This will return "domain is not running" failure, but that's fine, as we
            # offer "Force eject" option after any kind of failure
            m.execute(f"virsh destroy {self.vm_name}")
            b.click(".pf-c-modal-box__footer button:contains(Eject)")

            # Check "Force eject" is present and Regular eject is disabled
            b.wait_visible(".pf-c-modal-box__footer button:contains(Eject):disabled")
            b.wait_visible(".pf-c-modal-box__footer button:contains(Force eject)")

            # Start the VM again so "Force eject" will work
            m.execute(f"virsh start {self.vm_name}")
            # Watch D-BUS system calls
            # https://piware.de/2013/09/how-to-watch-system-d-bus-method-calls/
            m.write("/usr/share/dbus-1/system-local.conf", SYSTEM_LOCAL_CONF)
            # Setup dbus monitor to watch for disk ejection call
            m.spawn("dbus-monitor --system \"interface='org.libvirt.Domain',member='UpdateDevice'\" > /tmp/dbus_monitor_logs", "dbusmonitor")

            # Force eject the disc
            b.click(".pf-c-modal-box__footer button:contains(Force eject)")
            b.wait_not_present(".pf-c-modal-box")

            # Check dbus call was called with "VIR_DOMAIN_DEVICE_MODIFY_FORCE" flag
            # Flag 'uint32 7' is a logical OR of flags:
            # VIR_DOMAIN_DEVICE_MODIFY_CURRENT = 0
            # VIR_DOMAIN_DEVICE_MODIFY_LIVE = 1
            # VIR_DOMAIN_DEVICE_MODIFY_CONFIG = 2
            # VIR_DOMAIN_DEVICE_MODIFY_FORCE = 4 <- is only used with 'Force eject'
            # https://libvirt.org/html/libvirt-libvirt-domain.html
            monitor_logs = m.execute("cat /tmp/dbus_monitor_logs")
            self.test_obj.assertTrue("uint32 7" in monitor_logs)

            return self

        def verify_eject(self):
            b = self.test_obj.browser

            if self.mode == "custom-path":
                b.wait_not_present(f'#vm-{self.vm_name}-disks-{self.target}-source-file')
            else:
                b.wait_not_present(f'#vm-{self.vm_name}-disks-{self.target}-source-pool')
                b.wait_not_present(f'#vm-{self.vm_name}-disks-{self.target}-source-volume')

            disks = self._get_disks()

            # Empty cdrom should have no source
            self.test_obj.assertEqual(disks[self.target]['source'], '-')

    class VMAddDiskDialog(object):
        def __init__(
            self, test_obj, pool_name=None, volume_name=None,
            vm_name='subVmTest1',
            file_path=None, device=None,
            volume_size=10, volume_size_unit='MiB',
            mode="create-new",
            expected_target='vda', permanent=False, cache_mode=None,
            expected_format='raw',
            serial=None, expected_serial=None,
            bus_type=None, pool_type=None,
            volume_format=None, expected_volume_format=None,
            persistent_vm=True,
            expected_access=None,  # options: "Read-only", "Writeable", "Writeable and shared"
            pixel_test_tag=None,
            xfail=False, xfail_object=None,
            xfail_error_message=None, xfail_error_title=None,
            xwarning_object=None, xwarning_message=None,
            pixel_test_ignore=None,
            skip_add=False,
        ):
            self.test_obj = test_obj
            self.vm_name = vm_name
            self.file_path = file_path
            self.device = device
            self.pool_name = pool_name
            self.mode = mode
            self.volume_name = volume_name
            self.volume_size = volume_size
            self.volume_size_unit = volume_size_unit
            self.expected_target = expected_target
            self.expected_format = expected_format
            self.permanent = permanent
            self.cache_mode = cache_mode
            self.bus_type = bus_type
            self.serial = serial
            self.expected_serial = expected_serial or serial
            self.pool_type = pool_type
            self.volume_format = volume_format
            self.expected_volume_format = expected_volume_format
            self.persistent_vm = persistent_vm
            self.expected_access = expected_access
            self.skip_add = skip_add

            self.pixel_test_tag = pixel_test_tag
            self.pixel_test_ignore = pixel_test_ignore

            self.xfail = xfail
            self.xfail_object = xfail_object
            self.xfail_error_message = xfail_error_message
            self.xfail_error_title = xfail_error_title
            self.xwarning_object = xwarning_object
            self.xwarning_message = xwarning_message

        @staticmethod
        def getExpectedFormat(pool_type, expected_volume_format):
            # Guess by the name of the pool it's format to avoid passing more parameters
            if pool_type == 'iscsi':
                return 'unknown'
            elif pool_type == 'disk':
                return 'none'
            elif expected_volume_format == 'iso':
                return 'iso'
            else:
                return 'qcow2'

        def execute(self):
            self.open()
            self.fill()
            prefix = f"#vm-{self.vm_name}-disks-adddisk"

            if self.pixel_test_tag:
                self.test_obj.browser.wait_visible(f"{prefix}-dialog-add[aria-disabled=false]")
                self.test_obj.browser.assert_pixels(f"{prefix}-dialog-modal-window", self.pixel_test_tag,
                                                    ignore=[self.pixel_test_ignore] if self.pixel_test_ignore else [])

            if not self.skip_add:
                self.add_disk()
            if not self.xfail:
                self.verify_disk_added()
            else:
                if self.xfail_object:
                    self.test_obj.browser.wait_in_text(f"{prefix}-{self.xfail_object}-helper.pf-m-error", self.xfail_error_message)
                else:
                    self.test_obj.browser.wait_in_text(".pf-c-modal-box__body .pf-c-alert__title", self.xfail_error_title)
                    self.test_obj.browser.click(f"{prefix}-dialog-cancel")

        def open(self):
            b = self.test_obj.browser
            prefix = f"#vm-{self.vm_name}-disks-adddisk"
            b.click(prefix)  # button
            b.wait_in_text(".pf-c-modal-box__title", "Add disk")

            b.wait_visible(f"{prefix}-createnew:checked")
            if self.mode == "use-existing":
                b.click(f"{prefix}-useexisting")
            elif self.mode == "custom-path":
                b.click(f"{prefix}-custompath")

            return self

        def fill(self):
            b = self.test_obj.browser
            if self.mode == "create-new":
                # Choose storage pool
                if not self.pool_type or self.pool_type not in ['iscsi', 'iscsi-direct']:
                    b.wait_visible(f"#vm-{self.vm_name}-disks-adddisk-new-select-pool:enabled")
                    b.select_from_dropdown(f"#vm-{self.vm_name}-disks-adddisk-new-select-pool", self.pool_name)
                else:
                    b.click(f"#vm-{self.vm_name}-disks-adddisk-new-select-pool")
                    # Our custom select does not respond on the click function
                    b._wait_present(f".pf-c-modal-box option[value={self.pool_name}]:disabled")
                    return self

                # Insert name for the new volume
                b.set_input_text(f"#vm-{self.vm_name}-disks-adddisk-new-name", self.volume_name)
                # Insert size for the new volume
                b.set_input_text(f"#vm-{self.vm_name}-disks-adddisk-new-size", str(self.volume_size))
                b.select_from_dropdown(f"#vm-{self.vm_name}-disks-adddisk-new-unit", self.volume_size_unit)

                if self.volume_format:
                    b.select_from_dropdown(f"#vm-{self.vm_name}-disks-adddisk-new-format", self.volume_format)
                else:
                    b.wait_val(f"#vm-{self.vm_name}-disks-adddisk-new-format", self.getExpectedFormat(self.pool_type, self.expected_volume_format))
            elif self.mode == "custom-path":
                b.wait_visible(f"#vm-{self.vm_name}-disks-adddisk-file")
                # Type in file path
                b.set_file_autocomplete_val(f"#vm-{self.vm_name}-disks-adddisk-file", self.file_path)
                if self.device:
                    b.select_from_dropdown(f"#vm-{self.vm_name}-disks-adddisk-select-device", self.device)
            elif self.mode == "use-existing":
                b.wait_visible(f"#vm-{self.vm_name}-disks-adddisk-existing-select-pool:enabled")
                # Choose storage pool
                b.select_from_dropdown(f"#vm-{self.vm_name}-disks-adddisk-existing-select-pool", self.pool_name)
                # Select from the available volumes
                b.select_from_dropdown(f"#vm-{self.vm_name}-disks-adddisk-existing-select-volume", self.volume_name)

            # Configure persistency - by default the check box in unchecked for running VMs
            if self.permanent:
                b.click(f"#vm-{self.vm_name}-disks-adddisk-permanent")

            # Check non-persistent VM cannot have permanent disk attached
            if not self.persistent_vm:
                b.wait_not_present(f"#vm-{self.vm_name}-disks-adddisk-new-permanent")

            # Expand additional options
            if self.cache_mode or self.bus_type or self.serial:
                b.click("div.pf-c-modal-box button:contains(Show additional options)")
                b.wait_visible("div.pf-c-modal-box button[aria-expanded=true]:contains(Hide additional options)")

                # Configure performance options
                if self.cache_mode:
                    b.select_from_dropdown("#cache-mode", self.cache_mode)

                # Configure bus type
                if self.bus_type:
                    b.select_from_dropdown(f"div.pf-c-modal-box #vm-{self.vm_name}-disks-adddisk-bus-type", self.bus_type)

                # Configure serial number
                if self.serial:
                    if self.xwarning_object != "serial-characters":
                        b.set_input_text(f"#vm-{self.vm_name}-disks-adddisk-serial", self.serial)
                    else:
                        b.set_input_text(f"#vm-{self.vm_name}-disks-adddisk-serial", self.serial, value_check=False)
                        # unfit characters gets dynamically filtered from serial number
                        b.wait_val(f"#vm-{self.vm_name}-disks-adddisk-serial", self.expected_serial)

                    if self.xwarning_object == 'serial-characters':
                        b.wait_in_text("#serial-characters-message .pf-c-helper-text__item-text", self.xwarning_message)
                    elif self.xwarning_object == 'serial-length':
                        b.wait_in_text("#serial-length-message .pf-c-helper-text__item-text", self.xwarning_message)
                    else:
                        b.wait_not_present("#serial-length-message")
                        b.wait_not_present("#serial-characters-message")
            else:
                b.wait_not_visible("#cache-mode")
                b.wait_not_visible(f"div.pf-c-modal-box #vm-{self.vm_name}-disks-adddisk-bus-type")
                b.wait_not_visible(f"#vm-{self.vm_name}-disks-adddisk-serial")

            return self

        def add_disk(self):
            b = self.test_obj.browser
            b.click(".pf-c-modal-box__footer button:contains(Add)")

            return self

        def verify_disk_added(self):
            def _get_disk_prop(target, vm_name, prop):
                x_path = f"/domain/devices/disk[target/@dev='{target}']/{prop}"
                return m.execute(f"virsh dumpxml {self.vm_name} | xmllint --xpath \"{x_path}\" -").strip()

            b = self.test_obj.browser
            m = self.test_obj.machine
            b.wait_not_present(f"#vm-{self.vm_name}-disks-adddisk-dialog-modal-window")
            if self.device == "cdrom" or (self.file_path and self.file_path.endswith(".iso")) or self.expected_volume_format == "iso":
                expected_bus_type = self.bus_type or "scsi"
                expected_device = self.device or "cdrom"
            else:
                expected_bus_type = self.bus_type or "virtio"
                expected_device = self.device or "disk"

            b.wait_in_text(f"#vm-{self.vm_name}-disks-{self.expected_target}-bus", expected_bus_type)
            if self.expected_access:
                b.wait_in_text(f"#vm-{self.vm_name}-disks-{self.expected_target}-access", self.expected_access)
            b.wait_in_text(f"#vm-{self.vm_name}-disks-{self.expected_target}-device", expected_device)

            # Check volume was added to pool's volume list
            if self.mode == "create-new":
                self.test_obj.goToMainPage()
                b.click(".pf-c-card .pf-c-card__header button:contains(Storage pool)")

                self.test_obj.waitPoolRow(self.pool_name)
                self.test_obj.togglePoolRow(self.pool_name)

                b.click(f"tr[data-row-id=pool-{self.pool_name}-system] + tr li:contains('Storage volumes') button")  # open the "Storage volumes" subtab
                b.wait_visible(f"#pool-{self.pool_name}-system-volume-{self.volume_name}-name")

                b.click(".machines-listing-breadcrumb li a:contains(Virtual machines)")
                self.test_obj.goToVmPage(self.vm_name)

            # Detect volume format
            if self.mode != "custom-path":
                volume_xml = m.execute(f"virsh vol-dumpxml {self.volume_name} {self.pool_name} ")
                detect_format_cmd = "echo \"{0}\" | xmllint --xpath '{1}' -"

                b.wait_in_text(f'#vm-{self.vm_name}-disks-{self.expected_target}-source-volume', self.volume_name)

                expected_format = self.getExpectedFormat(self.pool_type, self.expected_volume_format)
                # Unknown pool format isn't present in xml anymore
                if expected_format == "unknown" and m.execute("virsh --version") >= "5.6.0":
                    m.execute(detect_format_cmd.format(volume_xml, "/volume/target") + " | grep -qv format")
                else:
                    vol_xml = m.execute(detect_format_cmd.format(volume_xml, "/volume/target/format")).rstrip()
                    self.test_obj.assertEqual(vol_xml, f'<format type="{self.volume_format or expected_format}"/>')
            else:
                b.wait_in_text(f'#vm-{self.vm_name}-disks-{self.expected_target}-source-file', self.file_path)
                domainXML = self.test_obj.machine.execute(f"virsh dumpxml {self.vm_name}")
                root = ET.fromstring(domainXML)
                devices = root.find('devices')
                for disk in devices.findall('disk'):
                    filepath = disk.find('source').get('file')
                    if filepath == self.file_path:
                        diskformat = disk.find('driver').get('type')
                        self.test_obj.assertEqual(diskformat, self.expected_format)
                        break

            if self.cache_mode:
                b.wait_in_text(f"#vm-{self.vm_name}-disks-{self.expected_target}-cache", self.cache_mode)

            if self.expected_serial:
                b.wait_in_text(f"#vm-{self.vm_name}-disks-{self.expected_target}-serial", self.expected_serial)

            # Check we set unmap by default
            self.test_obj.assertEqual(
                'discard="unmap"',
                _get_disk_prop(self.expected_target, self.vm_name, "driver/@discard")
            )

            return self

    @skipImage("TODO: scsi support missing on arch image", "arch")
    def testAddDiskSCSI(self):
        b = self.browser
        m = self.machine

        used_targets = ['vda']

        # Prepare an iscsi pool
        # Debian images' -cloud kernel don't have target-cli-mod kmod
        if "debian" not in m.image:
            # Preparations for testing ISCSI pools
            target_iqn = "iqn.2019-09.cockpit.lan"
            self.prepareStorageDeviceOnISCSI(target_iqn)

            m.execute(f"virsh pool-define-as iscsi-pool --type iscsi --target /dev/disk/by-id --source-host 127.0.0.1 --source-dev {target_iqn}")
            m.execute("virsh pool-start iscsi-pool")
            wait(lambda: "unit:0:0:0" in self.machine.execute("virsh pool-refresh iscsi-pool; virsh vol-list iscsi-pool"), delay=3)

            self.addCleanup(self.machine.execute, "virsh pool-destroy iscsi-pool; virsh pool-undefine iscsi-pool")

        args = self.createVm("subVmTest1")

        # Remove images pool so that we get the iscsi-pool pool first on the list
        m.execute("virsh pool-destroy images; virsh pool-undefine images")

        # Wait for the system to completely start
        wait(lambda: "login as 'cirros' user." in self.machine.execute(f"cat {args['logfile']}"), delay=3)

        self.login_and_go("/machines")
        b.wait_in_text("body", "Virtual machines")

        b.wait_in_text("#vm-subVmTest1-system-state", "Running")
        self.goToVmPage("subVmTest1")

        if "debian" not in m.image and "ubuntu" not in m.image:
            # ISCSI driver does not support virStorageVolCreate API
            self.VMAddDiskDialog(
                self,
                pool_name='iscsi-pool',
                pool_type='iscsi',
                xfail=True, xfail_object='new-select-pool', xfail_error_message='Pool type iscsi does not support volume creation',
            ).execute()

            self.VMAddDiskDialog(
                self,
                pool_name='iscsi-pool',
                pool_type='iscsi',
                volume_name='unit:0:0:0',
                expected_target=get_next_free_target(used_targets)[-1],
                mode='use-existing',
            ).execute()

            # Detach the iscsi disk before reaching teardown because shutting of the domains would sometimes hang when iscsi disks are attached
            m.execute("virsh detach-disk subVmTest1 --target vdb")

        # AppArmor doesn't like the non-standard path for our storage pools
        if m.image in ["debian-testing"]:
            self.allow_journal_messages(f'.* type=1400 .* apparmor="DENIED" operation="open" profile="libvirt.* name="{self.vm_tmpdir}.*')

    def testAddDiskNFS(self):
        b = self.browser
        m = self.machine

        used_targets = ['vda']

        m.execute("if selinuxenabled 2>/dev/null; then setsebool -P virt_use_nfs 1; fi")

        # Prepare a local NFS pool
        self.restore_file("/etc/exports")
        nfs_pool = os.path.join(self.vm_tmpdir, "nfs_pool")
        mnt_exports = os.path.join(self.vm_tmpdir, "mnt_exports")
        m.execute(f"mkdir {nfs_pool} {mnt_exports}")
        m.write("/etc/exports", f"{mnt_exports} 127.0.0.1/24(rw,sync,no_root_squash,no_subtree_check,fsid=0)")
        m.execute("systemctl restart nfs-server")
        m.execute(f"virsh pool-define-as nfs-pool --type netfs --target {nfs_pool} --source-host 127.0.0.1 --source-path {mnt_exports}")
        m.execute("virsh pool-start nfs-pool")
        # And create a volume on it in order to test use existing volume dialog
        m.execute("virsh vol-create-as --pool nfs-pool --name nfs-volume-0 --capacity 1M --format qcow2")

        args = self.createVm("subVmTest1")

        # Wait for the system to completely start
        wait(lambda: "login as 'cirros' user." in self.machine.execute(f"cat {args['logfile']}"), delay=3)

        self.login_and_go("/machines")
        b.wait_in_text("body", "Virtual machines")

        b.wait_in_text("#vm-subVmTest1-system-state", "Running")
        self.goToVmPage("subVmTest1")

        self.VMAddDiskDialog(
            self,
            pool_name='nfs-pool',
            volume_name='nfs-volume-0',
            mode='use-existing',
            volume_size=1,
            volume_size_unit='MiB',
            expected_target=get_next_free_target(used_targets)[-1],
            pixel_test_tag='vm-add-disk-modal-nfs',
            pixel_test_ignore='.pf-c-modal-box__footer',  # FIXME: The buttons size seems to change undeterministically
        ).execute()

        self.VMAddDiskDialog(
            self,
            pool_name='nfs-pool',
            volume_name='nfs-volume-1',
            volume_size=1,
            volume_size_unit='MiB',
            expected_target=get_next_free_target(used_targets)[-1],
        ).execute()

        # AppArmor doesn't like the non-standard path for our storage pools
        if m.image in ["debian-testing"]:
            self.allow_journal_messages(f'.* type=1400 .* apparmor="DENIED" operation="open" profile="libvirt.* name="{self.vm_tmpdir}.*')

    def testAddDiskPool(self):
        b = self.browser
        m = self.machine

        dev = self.add_ram_disk(2)
        used_targets = ['vda']

        self.machine.execute(f"""
            virsh pool-define-as pool-disk disk - - {dev} - {os.path.join(self.vm_tmpdir, 'poolDiskImages')}
            virsh pool-build pool-disk --overwrite
            virsh pool-start pool-disk""")

        loop_dev = self.add_loopback_disk()
        existing_disk = os.path.basename(loop_dev) + "p1"
        existing_disk2 = os.path.basename(loop_dev) + "p2"
        self.machine.execute(f"""
            virsh pool-define-as loop-disk disk - - {loop_dev} - {os.path.join(self.vm_tmpdir, 'poolLoopImages')}
            virsh pool-build loop-disk --overwrite
            virsh pool-start loop-disk
            virsh vol-create-as loop-disk {existing_disk} 1M
            virsh vol-create-as loop-disk {existing_disk2} 1M""")

        args = self.createVm("subVmTest1")

        # Wait for the system to completely start
        wait(lambda: "login as 'cirros' user." in self.machine.execute(f"cat {args['logfile']}"), delay=3)

        self.login_and_go("/machines")
        b.wait_in_text("body", "Virtual machines")

        b.wait_in_text("#vm-subVmTest1-system-state", "Running")
        self.goToVmPage("subVmTest1")

        partition = os.path.basename(dev) + "1"
        self.VMAddDiskDialog(
            self,
            pool_name='pool-disk',
            pool_type='disk',
            volume_name=partition,
            volume_size=1,
            volume_size_unit='MiB',
            expected_target=get_next_free_target(used_targets)[-1],
            pixel_test_tag='vm-add-disk-modal-disk-pool',
            pixel_test_ignore='.pf-c-modal-box__footer',  # FIXME: The buttons size seems to change undeterministically
        ).execute()

        self.VMAddDiskDialog(
            self,
            mode="use-existing",
            pool_name="loop-disk",
            pool_type="disk",
            volume_name=existing_disk,
            expected_target=get_next_free_target(used_targets)[-1],
        ).execute()

        # Test fix for https://bugzilla.redhat.com/show_bug.cgi?id=2107247
        m.upload([os.path.join(BOTS_DIR, "machine/cloud-init.iso")], "/var/lib/libvirt/images/defaultVol.iso")
        m.execute("virsh pool-refresh images")
        wait(lambda: "defaultVol.iso" in m.execute("virsh vol-list images"), delay=3)
        dialog = self.VMAddDiskDialog(
            self,
            mode="use-existing",
            pool_name="loop-disk",
            pool_type="disk",
            volume_name=existing_disk2,
            expected_target=get_next_free_target(used_targets)[-1],
            device="disk",
        )
        dialog.open()
        b.wait_visible("#vm-subVmTest1-disks-adddisk-existing-select-pool:enabled")
        # First select some iso file, where disk type is set to "cdrom"
        b.select_from_dropdown("#vm-subVmTest1-disks-adddisk-existing-select-pool", "images")
        b.select_from_dropdown("#vm-subVmTest1-disks-adddisk-existing-select-volume", "defaultVol.iso")
        # Then choose a different storage pool without selecting the volume (let UI automatically select the first volume from loop-disk storage pool)
        # This tests that type of disk automatically resets back to default "disk"
        b.select_from_dropdown("#vm-subVmTest1-disks-adddisk-existing-select-pool", "loop-disk")
        # Check that newly attached disk is indeed of type "disk"
        dialog.add_disk().verify_disk_added()

        # AppArmor doesn't like the non-standard path for our storage pools
        if m.image in ["debian-testing"]:
            self.allow_journal_messages(f'.* type=1400 .* apparmor="DENIED" operation="open" profile="libvirt.* name="{self.vm_tmpdir}.*')

    @timeout(900)
    def testAddDiskDirPool(self):
        b = self.browser
        m = self.machine
        prefix = "#vm-subVmTest1-disks-adddisk"

        used_targets = ['vda']
        transient_targets = []

        # prepare libvirt storage pools
        v1 = os.path.join(self.vm_tmpdir, "vm_one")
        v2 = os.path.join(self.vm_tmpdir, "vm_two")
        default_tmp = os.path.join(self.vm_tmpdir, "default_tmp")
        m.execute(f"mkdir --mode 777 {v1} {v2} {default_tmp}")
        m.execute(f"virsh pool-define-as default_tmp --type dir --target {default_tmp}; virsh pool-start default_tmp")
        m.execute(f"virsh pool-define-as myPoolOne --type dir --target {v1}; virsh pool-start myPoolOne")
        m.execute(f"virsh pool-define-as myPoolTwo --type dir --target {v2}; virsh pool-start myPoolTwo")

        m.upload([os.path.join(BOTS_DIR, "machine/cloud-init.iso")], os.path.join(default_tmp, "defaultVol.iso"))
        m.execute("virsh vol-create-as default_tmp defaultVol --capacity 10M --format raw")
        m.execute("virsh vol-create-as myPoolTwo mydiskofpooltwo_temporary --capacity 50M --format qcow2")
        m.execute("virsh vol-create-as myPoolTwo mydiskofpooltwo_permanent --capacity 50M --format qcow2")
        wait(lambda: "mydiskofpooltwo_permanent" in m.execute("virsh vol-list myPoolTwo"))

        self.createVm("subVmTest1")

        self.login_and_go("/machines")
        b.wait_in_text("body", "Virtual machines")

        b.wait_in_text("#vm-subVmTest1-system-state", "Running")
        self.goToVmPage("subVmTest1")

        transient_targets.append(get_next_free_target(used_targets)[-1])
        self.VMAddDiskDialog(
            self,
            pool_name='myPoolOne',
            volume_name='mydiskofpoolone_temporary',
            mode='create-new',
            volume_size=10,
            volume_size_unit='MiB',
            permanent=False,
            expected_target=transient_targets[-1],
        ).execute()

        self.VMAddDiskDialog(
            self,
            pool_name='myPoolOne',
            mode='use-existing',
        ).open()
        b.select_from_dropdown(f"{prefix}-existing-select-pool", "myPoolOne")
        # since both disks are already attached
        b.wait_attr(f"{prefix}-existing-select-volume", "disabled", "")
        b.wait_in_text(f"{prefix}-existing-select-volume", "The pool is empty")
        b.click(f"{prefix}-dialog-cancel")
        b.wait_not_present(f"{prefix}-dialog-modal-window")

        self.VMAddDiskDialog(
            self,
            pool_name='myPoolTwo',
            volume_name='mydiskofpooltwo_permanent',
            volume_size=2,
            permanent=True,
            mode='use-existing',
            expected_target=get_next_free_target(used_targets)[-1],
        ).execute()

        # check the autoselected options
        # default_tmp pool should be autoselected since it's the first in alphabetical order
        # defaultVol volume should be autoselected since it's the only volume in default_tmp pool
        transient_targets.append(get_next_free_target(used_targets)[-1])
        self.VMAddDiskDialog(
            self,
            pool_name='default_tmp',
            volume_name='defaultVol',
            mode='use-existing',
            expected_target=transient_targets[-1],
            volume_format='raw',
        ).open().add_disk().verify_disk_added()

        # shut off
        self.performAction("subVmTest1", "forceOff")

        # check if the just added non-permanent disks are gone
        for target in transient_targets:
            b.wait_not_present(f"#vm-subVmTest1-disks-{target}-device")
            release_target(used_targets, target)

        permanent_targets = [t for t in used_targets if t not in transient_targets]
        for target in permanent_targets:
            b.wait_visible(f"#vm-subVmTest1-disks-{target}-device")

        # check that bus and device type can be automatically picked up from the volume format (ISO)
        self.VMAddDiskDialog(
            self,
            expected_target='sda',
            pool_name='default_tmp',
            volume_name='defaultVol.iso',
            mode='use-existing',
            expected_volume_format='iso',
        ).execute()

        # Apparmor on debian and ubuntu may prevent access to /dev/sdb1 when starting VM,
        # https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/1677398
        if "debian" not in m.image and "ubuntu" not in m.image:
            # Run VM
            b.click("#vm-subVmTest1-system-run")
            b.wait_in_text("#vm-subVmTest1-system-state", "Running")
            # Test disk attachment to non-persistent VM
            m.execute("virsh undefine subVmTest1")
            self.VMAddDiskDialog(
                self,
                pool_name='myPoolOne',
                volume_name='non-peristent-vm-disk',
                permanent=False,
                persistent_vm=False,
                expected_target=get_next_free_target(used_targets)[-1],
            ).execute()

        # Undefine all Storage pools and  confirm that the Add Disk dialog is disabled
        active_pools = filter(lambda pool: pool != '', m.execute("virsh pool-list --name").split('\n'))
        for pool in active_pools:
            m.execute(f"virsh pool-destroy {pool}")
        inactive_pools = filter(lambda pool: pool != '', m.execute("virsh pool-list --inactive --name").split('\n'))
        for pool in inactive_pools:
            m.execute(f"virsh pool-undefine {pool}")
        b.wait_visible("#vm-details[data-pools-count=0]")

        b.click(prefix)  # radio button label in modal dialog
        b.wait_visible(f"{prefix}-dialog-add:disabled")
        b.click(f"{prefix}-useexisting")
        b.wait_visible(f"{prefix}-dialog-add:disabled")
        b.click(f"{prefix}-dialog-cancel")

        # Make sure that trying to inspect the Disks tab will just show the fields that are available when a pool is inactive
        b.reload()
        b.enter_page('/machines')
        b.wait_in_text("body", "Virtual machines")
        # Check that usage information can't be fetched since the pool is inactive
        b.wait_not_present("#vm-subVmTest1-disks-vdd-used")

        # AppArmor doesn't like the non-standard path for our storage pools
        if m.image in ["debian-testing"]:
            self.allow_journal_messages(f'.* type=1400 .* apparmor="DENIED" operation="open" profile="libvirt.* name="{self.vm_tmpdir}.*')

    def testAddDiskCustomPath(self):
        b = self.browser
        m = self.machine
        prefix = "#vm-subVmTest1-disks-adddisk"

        self.createVm("subVmTest1")

        # Prepare file for Custom File disk type
        m.execute("touch /var/lib/libvirt/novell.iso")
        m.execute("touch /var/lib/libvirt/empty.img")

        self.login_and_go("/machines")
        b.wait_in_text("body", "Virtual machines")

        b.wait_in_text("#vm-subVmTest1-system-state", "Running")
        self.goToVmPage("subVmTest1")

        # Using directory path for disk should create API failure
        self.VMAddDiskDialog(
            self,
            device='disk',
            file_path='/tmp/',
            mode='custom-path',
            xfail=True, xfail_error_title="Disk failed to be attached",
        ).execute()

        # check disk device type
        self.VMAddDiskDialog(
            self,
            device='disk',
            bus_type='scsi',
            expected_target='sda',
            expected_format='raw',
            file_path='/var/lib/libvirt/novell.iso',
            mode='custom-path',
            pixel_test_tag='vm-add-disk-modal-custom-path'
        ).execute()

        # Disk type should be detected
        m.execute("qemu-img create -f qcow2 /tmp/foobar.qcow2 1M")
        self.VMAddDiskDialog(
            self,
            device='disk',
            expected_target='vdb',
            expected_format='qcow2',
            file_path='/tmp/foobar.qcow2',
            mode='custom-path',
        ).execute()

        # Disk with backing file should shown a warning
        m.execute("qemu-img create -f qcow2 /tmp/backing.qcow2 1M")
        m.execute("qemu-img create -f qcow2 -F qcow2 -b /tmp/backing.qcow2 /tmp/base.qcow2")
        self.VMAddDiskDialog(
            self,
            device='disk',
            file_path='/tmp/base.qcow2',
            mode='custom-path',
            skip_add=True,
            xfail=True, xfail_object='file-autocomplete',
            xfail_error_message='Importing an image with a backing file is unsupported',
        ).execute()
        # Image can't be added, close dialog
        b.click(".pf-c-modal-box__footer button:contains(Cancel)")

        # non iso file
        self.VMAddDiskDialog(
            self,
            device='disk',
            bus_type='scsi',
            expected_target='sdb',
            file_path='/var/lib/libvirt/empty.img',
            mode='custom-path'
        ).execute()

        # shut off
        self.performAction("subVmTest1", "forceOff")

        # check cdrom device (cdrom can be only added to shut off VM)
        self.VMAddDiskDialog(
            self,
            device='cdrom',
            bus_type='scsi',
            expected_target='sda',
            file_path='/var/lib/libvirt/novell.iso',
            mode='custom-path'
        ).execute()

        # check that bus and device type can be automatically picked up from the *.iso extension
        # https://bugzilla.redhat.com/show_bug.cgi?id=1977810
        self.VMAddDiskDialog(
            self,
            expected_target='sda',
            file_path='/var/lib/libvirt/novell.iso',
            mode='custom-path'
        ).execute()

        # Ensure that pressing a few times arrow-down in the file selector will not cause an oops
        # https://bugzilla.redhat.com/show_bug.cgi?id=1977554
        self.VMAddDiskDialog(
            self,
            mode='custom-path'
        ).open()
        b.click(f"{prefix}-file .pf-c-select__toggle-button")
        b.wait_visible(f"{prefix}-file-autocomplete")
        b.key_press(chr(40), use_ord=True)
        b.focus(f"{prefix}-file-autocomplete li:first-child")
        for retry in range(0, 10):
            b.key_press(chr(40), use_ord=True)
            time.sleep(0.5)
        b.wait_not_present("#navbar-oops")

        # Ensure that adding disks with custom path is possible when no storage pools are present
        # https://bugzilla.redhat.com/show_bug.cgi?id=1985228
        m.execute("for pool in $(virsh pool-list --all --name); do virsh pool-destroy $pool || true; virsh pool-undefine $pool; done")
        b.wait_visible("#vm-details[data-pools-count=0]")
        b.click(prefix)
        b.set_file_autocomplete_val("#vm-subVmTest1-disks-adddisk-file", "/var/lib/libvirt/novell.iso")
        b.wait_visible(f"{prefix}-custompath:checked")
        b.wait_visible(f"{prefix}-dialog-add[aria-disabled=false]")
        b.click(f"{prefix}-createnew")
        b.wait_visible(f"{prefix}-dialog-add[aria-disabled=true]")
        b.click(f"{prefix}-useexisting")
        b.wait_visible(f"{prefix}-dialog-add[aria-disabled=true]")
        b.click(f"{prefix}-dialog-cancel")
        self.VMAddDiskDialog(
            self,
            expected_target='sda',
            file_path='/var/lib/libvirt/novell.iso',
            mode='custom-path'
        ).execute()

        # Ensure that the bus type autofilled value is according to the bus types of the existing disks of the guest,
        # even when these are not in the predefined value (see ide)
        m.execute("virt-xml subVmTest1 --edit --disk target=vda,bus=ide,path=/var/lib/libvirt/images/subVmTest1-2.img,clearxml=yes")
        b.wait_text("#vm-subVmTest1-disks-vda-bus", "ide")
        b.click(prefix)
        b.click(f"{prefix}-custompath")
        b.select_from_dropdown(f"{prefix}-select-device", "disk")
        b.click("div.pf-c-modal-box button:contains(Show additional options)")
        b.wait_visible(f"{prefix}-bus-type[data-value=ide]")

    def testAddDiskAdditionalOptions(self):
        b = self.browser
        m = self.machine

        used_targets = ['vda']
        # prepare libvirt storage pools
        v1 = os.path.join(self.vm_tmpdir, "vm_one")
        m.execute(f"mkdir --mode 777 {v1}")
        m.execute(f"virsh pool-define-as myPoolOne --type dir --target {v1}; virsh pool-start myPoolOne")
        m.execute("virsh vol-create-as myPoolOne qcowVol --capacity 10M --format qcow2")

        self.createVm("subVmTest1", running=False)

        self.login_and_go("/machines")
        b.wait_in_text("body", "Virtual machines")

        self.goToVmPage("subVmTest1")

        # Configure cache mode from the UI
        self.VMAddDiskDialog(
            self,
            pool_name='myPoolOne',
            volume_name='writeback_cache_disk',
            mode='create-new',
            volume_size=2,
            cache_mode='writeback',
            expected_target=get_next_free_target(used_targets)[-1],
            pixel_test_tag='vm-add-disk-modal-additional-options'
        ).execute()

        # Configure scsi bus type from the UI
        self.VMAddDiskDialog(
            self,
            pool_name='myPoolOne',
            volume_name='scsi_bus_disk',
            mode='create-new',
            bus_type='scsi',
            expected_target='sda',
        ).execute()

        # Configure usb bus type from the UI
        self.VMAddDiskDialog(
            self,
            pool_name='myPoolOne',
            volume_name='usb_bus_disk',
            mode='create-new',
            bus_type='usb',
            expected_target='sdb',
        ).execute()

        # testing sata disk after VM shutoff because sata disk cannot be hotplugged
        self.VMAddDiskDialog(
            self,
            pool_name='myPoolOne',
            volume_name='sata_bus_disk',
            mode='create-new',
            bus_type='sata',
            expected_target='sdc',
        ).execute()

        # Disk with serial number
        self.VMAddDiskDialog(
            self,
            pool_name='myPoolOne',
            volume_name='disk_with_serial',
            mode='create-new',
            serial='disk_WITH-serial+.',
            expected_target='vdc',
        ).execute()

        # Disk with serial number
        self.VMAddDiskDialog(
            self,
            pool_name='myPoolOne',
            volume_name='disk_with_serial_warning1',
            mode='create-new',
            serial='íňválíď;šéříáľ123',
            expected_serial='vl123',
            expected_target='vdd',
            xwarning_object="serial-characters",
            xwarning_message="Allowed characters"
        ).execute()

        # Disk with serial number
        self.VMAddDiskDialog(
            self,
            pool_name='myPoolOne',
            volume_name='disk_with_serial_warning2',
            mode='create-new',
            serial='serial_so_long_its_longer_than_20_characters',
            expected_serial='serial_so_long_its_l',
            expected_target='vde',
            xwarning_object="serial-length",
            xwarning_message="Identifier may be silently truncated to 20 characters (serial_so_long_its_l)"
        ).execute()

        m.execute("touch /var/lib/libvirt/novell.iso")
        m.execute("virsh vol-create-as myPoolOne rawVol --capacity 10M --format raw")
        # CDROM (ISO) disk can only be read-only
        self.VMAddDiskDialog(
            self,
            device='cdrom',
            bus_type='scsi',
            expected_target='sdd',
            file_path='/var/lib/libvirt/novell.iso',
            expected_access='Read-only',
            mode='custom-path'
        ).execute()

        # Raw disk should be Writeable
        self.VMAddDiskDialog(
            self,
            vm_name="subVmTest1",
            pool_name='myPoolOne',
            volume_name='rawVol',
            mode='use-existing',
            expected_target='vdf',
            expected_access="Writeable",
            volume_format='raw',
        ).execute()

        self.createVm("subVmTest2")
        self.goToMainPage()
        self.waitVmRow("subVmTest2")
        self.goToVmPage("subVmTest2")
        # Adding a raw disk to multiple VMs should make it 'shared'
        self.VMAddDiskDialog(
            self,
            vm_name="subVmTest2",
            pool_name='myPoolOne',
            volume_name='rawVol',
            mode='use-existing',
            expected_target='vdb',
            expected_access="Writeable and shared",
            volume_format='raw',
        ).execute()

        # QCOW2 disks cannot be 'shared', so even after adding it to multiple VMs, it's access should be only "writeable"
        m.execute("virsh attach-disk --domain subVmTest1 --source myPoolOne/qcowVol --target vdg --targetbus virtio --config")
        self.VMAddDiskDialog(
            self,
            vm_name="subVmTest2",
            pool_name='myPoolOne',
            volume_name='qcowVol',
            mode='use-existing',
            expected_target='vdc',
            expected_access="Writeable",
            volume_format='qcow2',
        ).execute()

    def testDetachDisk(self):
        b = self.browser
        m = self.machine

        # prepare libvirt storage pools
        p1 = os.path.join(self.vm_tmpdir, "vm_one")
        m.execute(f"mkdir --mode 777 {p1}")
        m.execute(f"virsh pool-create-as myPoolOne --type dir --target {p1}")
        m.execute("virsh vol-create-as myPoolOne mydiskofpoolone_1 --capacity 1G --format qcow2")
        m.execute("virsh vol-create-as myPoolOne mydiskofpoolone_2 --capacity 1G --format qcow2")
        m.execute("virsh vol-create-as myPoolOne mydiskofpoolone_3 --capacity 1M --format qcow2")
        wait(lambda: "mydiskofpoolone_1" in m.execute("virsh vol-list myPoolOne"))
        wait(lambda: "mydiskofpoolone_2" in m.execute("virsh vol-list myPoolOne"))
        wait(lambda: "mydiskofpoolone_3" in m.execute("virsh vol-list myPoolOne"))

        args = self.createVm("subVmTest1")

        vdc_path = f"{p1}/mydiskofpoolone_1"
        vdd_path = f"{p1}/mydiskofpoolone_2"
        vde_path = f"{p1}/mydiskofpoolone_3"
        m.execute(f"virsh attach-disk --domain subVmTest1 --source {vdc_path} --target vdc --targetbus virtio")
        m.execute(f"virsh attach-disk --domain subVmTest1 --source {vdd_path} --target vdd --targetbus virtio --persistent")
        m.execute(f"virsh attach-disk --domain subVmTest1 --source {vde_path} --target vde --targetbus virtio --persistent")

        self.login_and_go("/machines")
        b.wait_in_text("body", "Virtual machines")
        self.waitVmRow("subVmTest1")

        # Wait for the login prompt before we try detaching disks - we need the OS to be fully responsive
        wait(lambda: "login as 'cirros' user" in self.machine.execute(f"cat {args['logfile']}"), delay=3)

        # Test detaching non permanent disk of a running domain
        b.wait_in_text("#vm-subVmTest1-system-state", "Running")
        self.goToVmPage("subVmTest1")

        b.wait_visible("#vm-subVmTest1-disks-vdc-action-kebab")
        b.click("#vm-subVmTest1-disks-vdc-action-kebab button")
        b.wait_visible("#vm-subVmTest1-disks-vdc-device")
        b.click("#delete-vm-subVmTest1-disks-vdc")
        b.wait_visible(".pf-c-modal-box")
        b.wait_in_text(".pf-c-modal-box__body .pf-c-description-list", "subVmTest1")
        b.wait_in_text("#delete-resource-modal-target", "vdc")
        b.wait_in_text("#delete-resource-modal-file", vdc_path)
        b.click(".pf-c-modal-box__footer button:contains(Remove)")
        b.wait_visible(".pf-c-modal-box__footer button.pf-m-in-progress")
        # When live-detaching disks the guest OS needs to cooperate so that we can
        # see the disk getting detached in the UI.
        # Wait until we see the login prompt before attempting operation that need
        # the OS for fully respond
        with b.wait_timeout(180):
            b.wait_not_present("#vm-subVmTest1-disks-vdc-device")
        b.wait_not_present(".pf-c-modal-box")

        # Test that detaching disk of a running domain will affect the
        # inactive configuration as well
        self.performAction("subVmTest1", "forceOff")
        b.wait_not_present("#vm-subVmTest1-disks-vdc-device")

        # Test detaching permanent disk of a stopped domain
        b.wait_visible("#vm-subVmTest1-disks-vdd-action-kebab")
        b.click("#vm-subVmTest1-disks-vdd-action-kebab button")
        b.wait_visible("#vm-subVmTest1-disks-vdd-device")
        b.click("#delete-vm-subVmTest1-disks-vdd")
        b.wait_in_text("#delete-resource-modal-target", "vdd")
        b.wait_in_text("#delete-resource-modal-file", vdd_path)
        b.wait_visible(".pf-c-modal-box")
        b.click(".pf-c-modal-box__footer button:contains(Remove)")
        b.wait_not_present("#vm-subVmTest1-disks-vdd-device")
        b.wait_not_present(".pf-c-modal-box")

        # Test detaching several disks and the deletion dialog can be closed correctly
        m.execute("virsh vol-create-as myPoolOne diskVirtio --capacity 1M --format qcow2")
        m.execute("virsh vol-create-as myPoolOne diskSata --capacity 1M --format qcow2")
        wait(lambda: "diskVirtio" in m.execute("virsh vol-list myPoolOne"))
        wait(lambda: "diskSata" in m.execute("virsh vol-list myPoolOne"))
        vdf_path = f"{p1}/diskVirtio"
        sda_path = f"{p1}/diskSata"
        m.execute(f"virsh attach-disk --domain subVmTest1 --source {vdf_path} --target vdf --targetbus virtio --persistent")
        m.execute(f"virsh attach-disk --domain subVmTest1 --source {sda_path} --target sda --targetbus sata --persistent")

        # Need to refresh when attaching disk for shutoff VM
        b.reload()
        b.enter_page('/machines')
        b.wait_in_text("body", "Virtual machines")
        b.wait_visible("#vm-subVmTest1-disks-sda-device")
        b.wait_visible("#vm-subVmTest1-disks-vdf-device")

        def removeDiskAndChecks(target, path):
            b.wait_visible(f"#vm-subVmTest1-disks-{target}-action-kebab")
            b.click(f"#vm-subVmTest1-disks-{target}-action-kebab button")
            b.wait_visible(f"#delete-vm-subVmTest1-disks-{target}")
            b.click(f"#delete-vm-subVmTest1-disks-{target}")
            b.wait_in_text("div[role=dialog]:contains(\"Remove disk\")", target)
            b.wait_in_text("#delete-resource-modal-target", target)
            b.wait_in_text("#delete-resource-modal-file", path)
            b.click("div[role=dialog] button:contains(Remove)")
            # The deletion dialog should be closed
            b.wait_not_present("div[role=dialog]:contains(\"Remove disk\")")
            # No error shown
            b.wait_not_present("div[aria-label=\"Danger Alert\"]")
            b.wait_not_present(f"#vm-subVmTest1-disks-{target}-device")

        removeDiskAndChecks("sda", sda_path)
        removeDiskAndChecks("vdf", vdf_path)

        # Test detaching disk of a paused domain
        m.execute(f"> {args['logfile']}")  # clear logfile
        m.execute("virsh start subVmTest1")
        # Make sure that the VM booted normally before attempting to suspend it
        wait(lambda: "login as 'cirros' user" in m.execute(f"cat {args['logfile']}"), delay=3)
        m.execute("virsh suspend subVmTest1")
        b.wait_in_text("#vm-subVmTest1-system-state", "Paused")
        b.wait_visible("#vm-subVmTest1-disks-vde-action-kebab")
        b.click("#vm-subVmTest1-disks-vde-action-kebab button")
        b.wait_visible("#delete-vm-subVmTest1-disks-vde a[aria-disabled=true]")
        m.execute("virsh resume subVmTest1")
        wait(lambda: "login as 'cirros' user." in self.machine.execute(f"cat {args['logfile']}"), delay=3)

        # Test detaching of disk on non-persistent VM
        m.execute("virsh undefine subVmTest1")
        m.execute(f"virsh attach-disk --domain subVmTest1 --source {p1}/mydiskofpoolone_1 --target vdc --targetbus virtio")
        b.wait_visible("#vm-subVmTest1-disks-vdc-action-kebab")
        b.click("#vm-subVmTest1-disks-vdc-action-kebab button")
        b.wait_visible("#vm-subVmTest1-disks-vdc-device")
        b.click("#delete-vm-subVmTest1-disks-vdc")
        b.wait_visible(".pf-c-modal-box")
        b.click(".pf-c-modal-box__footer button:contains(Remove)")
        b.wait_not_present("#vm-subVmTest1-disks-vdc-device")
        b.wait_not_present(".pf-c-modal-box")

    def testInsertDiscCDROM(self):
        b = self.browser
        m = self.machine

        # Prepare storage pool with an ISO file in it to be used with "Use existing" option
        m.execute("virsh pool-define-as images --type dir --target /var/lib/libvirt/images; virsh pool-start images")
        m.upload([os.path.join(BOTS_DIR, "machine/cloud-init.iso")], "/var/lib/libvirt/images/cd.iso")

        # Prepare ISO file to be used with "Custom path" option
        m.execute("touch /var/lib/libvirt/novell.iso")

        self.createVm("subVmTest1", running=False)
        # Add empty CDROM device to the VM
        m.execute("virt-xml subVmTest1 --add-device --disk target.dev=sda,device=cdrom")
        m.execute("virsh start subVmTest1")

        self.login_and_go("/machines")
        b.wait_in_text("body", "Virtual machines")

        b.wait_in_text("#vm-subVmTest1-system-state", "Running")
        self.goToVmPage("subVmTest1")

        self.VMInsertMediaDialog(
            self,
            mode='custom-path',
            target='sda',
            file_path='/var/lib/libvirt/novell.iso',
        ).execute()

        self.VMInsertMediaDialog(
            self,
            mode='use-existing',
            target='sda',
            pool_name='images',
            volume_name='cd.iso',
        ).execute()

        # On Ubuntu and Debian ejecting CDROM disc after rebooting VM will lead to the failure:
        # "unable to execute QEMU command 'blockdev-remove-medium': Tray of device 'ide0-1-0' is not open"
        # https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/1930398
        if "debian" not in m.image and "ubuntu" not in m.image:
            self.VMInsertMediaDialog(
                self,
                mode='custom-path',
                target='sda',
                file_path='/var/lib/libvirt/novell.iso',
                force=True,
            ).execute()


if __name__ == '__main__':
    test_main()
