mirror of
https://github.com/virt-manager/virt-manager.git
synced 2025-01-21 18:03:58 +03:00
VirtualDisk: Always use storage APIs for provisioning
This means if we are passed an unmanaged path, we try to create a storage pool for the parent directory. We skip directories like /dev where doing this might be problematic. This makes things much friendlier to use for remote connections, and means we can always rely on having libvirt's storage APIs to use for format probing.
This commit is contained in:
parent
67743cbe31
commit
c5f6c6852a
@ -50,7 +50,7 @@
|
||||
</disk>
|
||||
<disk type="file" device="disk">
|
||||
<driver error_policy="enospace"/>
|
||||
<source file="/tmp/__virtinst_cli_new1.img"/>
|
||||
<source file="/dev/default-pool/new1.img"/>
|
||||
<target dev="hdb" bus="ide"/>
|
||||
<readonly/>
|
||||
</disk>
|
||||
@ -180,7 +180,7 @@
|
||||
</disk>
|
||||
<disk type="file" device="disk">
|
||||
<driver error_policy="enospace"/>
|
||||
<source file="/tmp/__virtinst_cli_new1.img"/>
|
||||
<source file="/dev/default-pool/new1.img"/>
|
||||
<target dev="hdb" bus="ide"/>
|
||||
<readonly/>
|
||||
</disk>
|
||||
|
@ -94,9 +94,12 @@ test_files = {
|
||||
'CLONE_NOEXIST_XML' : "%s/clone-disk-noexist.xml" % xmldir,
|
||||
'IMAGE_XML' : "%s/image.xml" % xmldir,
|
||||
'IMAGE_NOGFX_XML' : "%s/image-nogfx.xml" % xmldir,
|
||||
'NEWIMG1' : new_images[0],
|
||||
'NEWIMG2' : new_images[1],
|
||||
'NEWIMG3' : new_images[2],
|
||||
'NEWIMG1' : "/dev/default-pool/new1.img",
|
||||
'NEWIMG2' : "/dev/default-pool/new2.img",
|
||||
'NEWCLONEIMG1' : new_images[0],
|
||||
'NEWCLONEIMG2' : new_images[1],
|
||||
'NEWCLONEIMG3' : new_images[2],
|
||||
'AUTOMANAGEIMG' : "/some/new/pool/dir/new.img",
|
||||
'EXISTIMG1' : exist_images[0],
|
||||
'EXISTIMG2' : exist_images[1],
|
||||
'ROIMG' : ro_img,
|
||||
@ -159,6 +162,7 @@ class Command(object):
|
||||
if self.input_file:
|
||||
sys.stdin = file(self.input_file)
|
||||
|
||||
exc = ""
|
||||
try:
|
||||
if app.count("virt-install"):
|
||||
ret = virtinstall.main(conn=conn)
|
||||
@ -172,10 +176,13 @@ class Command(object):
|
||||
ret = virtxml.main(conn=conn)
|
||||
except SystemExit, sys_e:
|
||||
ret = sys_e.code
|
||||
except Exception:
|
||||
ret = -1
|
||||
exc = "\n" + "".join(traceback.format_exc())
|
||||
|
||||
if ret != 0:
|
||||
ret = -1
|
||||
outt = out.getvalue()
|
||||
outt = out.getvalue() + exc
|
||||
if outt.endswith("\n"):
|
||||
outt = outt[:-1]
|
||||
return (ret, outt)
|
||||
@ -648,6 +655,9 @@ c.add_valid("--nodisks --cdrom %(MANAGEDEXIST1)s") # Managed CDROM install
|
||||
c.add_valid("--pxe --file %(MANAGEDEXIST1)s") # Using existing managed storage
|
||||
c.add_valid("--pxe --disk vol=%(POOL)s/%(VOL)s") # Using existing managed storage 2
|
||||
c.add_valid("--pxe --disk pool=%(POOL)s,size=.04") # Creating storage on managed pool
|
||||
c.add_valid("--pxe --disk /foo/bar/baz,size=.01") # Creating any random path on the remote host
|
||||
c.add_valid("--pxe --disk /dev/zde") # /dev file that we just pass through to the remote VM
|
||||
c.add_invalid("--pxe --disk /foo/bar/baz") # File that doesn't exist after auto storage setup
|
||||
c.add_invalid("--nodisks --location /tmp") # Use of --location
|
||||
c.add_invalid("--file %(EXISTIMG1)s --pxe") # Trying to use unmanaged storage
|
||||
|
||||
@ -695,6 +705,7 @@ c.add_valid("--disk pool=default,size=.00001") # Building 'default' pool
|
||||
c.add_valid("--disk path=%(EXISTIMG1)s,bus=usb") # Existing USB disk
|
||||
c.add_valid("--disk path=%(EXISTIMG1)s,bus=usb,removable=on") # Existing USB disk as removable
|
||||
c.add_valid("--disk path=%(EXISTIMG1)s,bus=usb,removable=off") # Existing USB disk as non-removable
|
||||
c.add_valid("--disk %(AUTOMANAGEIMG)s,size=.1") # autocreate the pool
|
||||
c.add_invalid("--disk %(NEWIMG1)s,sparse=true,size=100000000000 --force") # Don't warn about fully allocated file exceeding disk space
|
||||
c.add_invalid("--file %(NEWIMG1)s --file-size 100000 --nonsparse") # Nonexisting file, size too big
|
||||
c.add_invalid("--file %(NEWIMG1)s --file-size 100000") # Huge file, sparse, but no prompting
|
||||
@ -705,7 +716,7 @@ c.add_invalid("--disk pool=foopool,size=.0001") # Specify a nonexistent pool
|
||||
c.add_invalid("--disk vol=%(POOL)s/foovol") # Specify a nonexistent volume
|
||||
c.add_invalid("--disk pool=%(POOL)s") # Specify a pool with no size
|
||||
c.add_invalid("--disk path=%(EXISTIMG1)s,perms=ro,size=.0001,cache=FOOBAR") # Unknown cache type
|
||||
c.add_invalid("--disk path=%(NEWIMG1)s,format=qcow2,size=.0000001") # Unmanaged file using non-raw format
|
||||
c.add_invalid("--disk path=/dev/foo/bar/baz,format=qcow2,size=.0000001") # Unmanaged file using non-raw format
|
||||
c.add_invalid("--disk path=%(MANAGEDDISKNEW1)s,format=raw,size=.0000001") # Managed disk using any format
|
||||
c.add_invalid("--disk %(NEWIMG1)s") # Not specifying path= and non existent storage w/ no size
|
||||
c.add_invalid("--disk %(NEWIMG1)s,sparse=true,size=100000000000") # Fail if fully allocated file would exceed disk space
|
||||
@ -919,12 +930,12 @@ c.add_invalid("-o test-for-clone --auto-clone")
|
||||
|
||||
c = vclon.add_category("general", "-n clonetest")
|
||||
c.add_valid("-o test") # Nodisk guest
|
||||
c.add_valid("-o test --file %(NEWIMG1)s --file %(NEWIMG2)s") # Nodisk, but with spurious files passed
|
||||
c.add_valid("-o test --file %(NEWIMG1)s --file %(NEWIMG2)s --prompt") # Working scenario w/ prompt shouldn't ask anything
|
||||
c.add_valid("--original-xml %(CLONE_DISK_XML)s --file %(NEWIMG1)s --file %(NEWIMG2)s") # XML File with 2 disks
|
||||
c.add_valid("-o test --file %(NEWCLONEIMG1)s --file %(NEWCLONEIMG2)s") # Nodisk, but with spurious files passed
|
||||
c.add_valid("-o test --file %(NEWCLONEIMG1)s --file %(NEWCLONEIMG2)s --prompt") # Working scenario w/ prompt shouldn't ask anything
|
||||
c.add_valid("--original-xml %(CLONE_DISK_XML)s --file %(NEWCLONEIMG1)s --file %(NEWCLONEIMG2)s") # XML File with 2 disks
|
||||
c.add_valid("--original-xml %(CLONE_DISK_XML)s --file virt-install --file %(EXISTIMG1)s --preserve") # XML w/ disks, overwriting existing files with --preserve
|
||||
c.add_valid("--original-xml %(CLONE_DISK_XML)s --file %(NEWIMG1)s --file %(NEWIMG2)s --file %(NEWIMG3)s --force-copy=hdc") # XML w/ disks, force copy a readonly target
|
||||
c.add_valid("--original-xml %(CLONE_DISK_XML)s --file %(NEWIMG1)s --file %(NEWIMG2)s --force-copy=fda") # XML w/ disks, force copy a target with no media
|
||||
c.add_valid("--original-xml %(CLONE_DISK_XML)s --file %(NEWCLONEIMG1)s --file %(NEWCLONEIMG2)s --file %(NEWCLONEIMG3)s --force-copy=hdc") # XML w/ disks, force copy a readonly target
|
||||
c.add_valid("--original-xml %(CLONE_DISK_XML)s --file %(NEWCLONEIMG1)s --file %(NEWCLONEIMG2)s --force-copy=fda") # XML w/ disks, force copy a target with no media
|
||||
c.add_valid("--original-xml %(CLONE_STORAGE_XML)s --file %(MANAGEDNEW1)s") # XML w/ managed storage, specify managed path
|
||||
c.add_valid("--original-xml %(CLONE_NOEXIST_XML)s --file %(EXISTIMG1)s --preserve") # XML w/ managed storage, specify managed path across pools# Libvirt test driver doesn't support cloning across pools# XML w/ non-existent storage, with --preserve
|
||||
c.add_valid("-o test -n test-many-devices --replace") # Overwriting existing VM
|
||||
@ -934,7 +945,7 @@ c.add_invalid("-o idontexist --auto-clone") # Non-existent vm name with auto fl
|
||||
c.add_invalid("-o test -n test") # Colliding new name
|
||||
c.add_invalid("--original-xml %(CLONE_DISK_XML)s") # XML file with several disks, but non specified
|
||||
c.add_invalid("--original-xml %(CLONE_DISK_XML)s --file virt-install --file %(EXISTIMG1)s") # XML w/ disks, overwriting existing files with no --preserve
|
||||
c.add_invalid("--original-xml %(CLONE_DISK_XML)s --file %(NEWIMG1)s --file %(NEWIMG2)s --force-copy=hdc") # XML w/ disks, force copy but not enough disks passed
|
||||
c.add_invalid("--original-xml %(CLONE_DISK_XML)s --file %(NEWCLONEIMG1)s --file %(NEWCLONEIMG2)s --force-copy=hdc") # XML w/ disks, force copy but not enough disks passed
|
||||
c.add_invalid("--original-xml %(CLONE_STORAGE_XML)s --file /tmp/clonevol") # XML w/ managed storage, specify unmanaged path (should fail)
|
||||
c.add_invalid("--original-xml %(CLONE_NOEXIST_XML)s --file %(EXISTIMG1)s") # XML w/ non-existent storage, WITHOUT --preserve
|
||||
c.add_invalid("--original-xml %(CLONE_DISK_XML)s --file %(ROIMG)s --file %(ROIMG)s --force") # XML w/ managed storage, specify RO image without preserve
|
||||
|
@ -200,19 +200,3 @@ class TestClone(unittest.TestCase):
|
||||
return
|
||||
|
||||
raise AssertionError("Expected exception, but none raised.")
|
||||
|
||||
def testCloneManagedToUnmanaged(self):
|
||||
base = "managed-storage"
|
||||
|
||||
# We are trying to clone from a pool (/dev/default-pool) to unmanaged
|
||||
# storage. For this case, the cloning needs to fail back to manual
|
||||
# operation (no libvirt calls), but since /dev/default-pool doesn't exist,
|
||||
# this should fail.
|
||||
try:
|
||||
self._clone_helper(base, ["/tmp/new1.img", "/tmp/new2.img"],
|
||||
compare=False)
|
||||
|
||||
raise AssertionError("Managed to unmanaged succeeded, expected "
|
||||
"failure.")
|
||||
except (ValueError, RuntimeError), e:
|
||||
logging.debug("Received expected exception: %s", str(e))
|
||||
|
@ -77,11 +77,12 @@ def openconn(uri):
|
||||
|
||||
if uri not in _conn_cache:
|
||||
_conn_cache[uri] = {}
|
||||
cache = _conn_cache[uri]
|
||||
_conn_cache[uri]["vms"] = conn._fetch_all_guests_cached()
|
||||
_conn_cache[uri]["pools"] = conn._fetch_all_pools_cached()
|
||||
_conn_cache[uri]["vols"] = conn._fetch_all_vols_cached()
|
||||
cache = _conn_cache[uri].copy()
|
||||
|
||||
def cb_fetch_all_guests():
|
||||
if "vms" not in cache:
|
||||
cache["vms"] = conn._fetch_all_guests_cached()
|
||||
return cache["vms"]
|
||||
|
||||
def cb_fetch_all_pools():
|
||||
@ -97,6 +98,7 @@ def openconn(uri):
|
||||
def cb_clear_cache(pools=False):
|
||||
if pools:
|
||||
cache.pop("pools", None)
|
||||
cache.pop("vols", None)
|
||||
|
||||
conn.cb_fetch_all_guests = cb_fetch_all_guests
|
||||
conn.cb_fetch_all_pools = cb_fetch_all_pools
|
||||
|
@ -954,25 +954,6 @@ class TestXMLConfig(unittest.TestCase):
|
||||
self._testInstall(g, "winxp-kvm-stage1",
|
||||
"winxp-kvm-stage3", "winxp-kvm-stage2")
|
||||
|
||||
def testCreateDisk(self):
|
||||
"""
|
||||
Doesn't really belong here, but what the hell :)
|
||||
"""
|
||||
path = "/tmp/__virtinst_create_test__.img"
|
||||
sizegigs = .001
|
||||
sizebytes = long(sizegigs * 1024L * 1024L * 1024L)
|
||||
|
||||
for sparse in [True, False]:
|
||||
disk = VirtualDisk(utils.get_conn())
|
||||
disk.path = path
|
||||
disk.set_create_storage(size=sizegigs, sparse=sparse)
|
||||
disk.validate()
|
||||
disk.setup()
|
||||
|
||||
actualsize = long(os.path.getsize(path))
|
||||
os.unlink(path)
|
||||
self.assertEquals(sizebytes, actualsize)
|
||||
|
||||
def testDefaultBridge(self):
|
||||
origfunc = None
|
||||
util = None
|
||||
|
62
virt-clone
62
virt-clone
@ -27,7 +27,7 @@ import sys
|
||||
import urlgrabber.progress as progress
|
||||
|
||||
import virtinst.cli as cli
|
||||
from virtinst import Cloner, VirtualDisk
|
||||
from virtinst import Cloner
|
||||
from virtinst.cli import fail, print_stdout, print_stderr
|
||||
|
||||
|
||||
@ -67,29 +67,8 @@ def get_clone_macaddr(new_mac, design):
|
||||
design.clone_macs = new_mac
|
||||
|
||||
|
||||
def get_clone_uuid(new_uuid, design):
|
||||
if new_uuid is not None:
|
||||
design.clone_uuid = new_uuid
|
||||
|
||||
|
||||
def _build_disk(conn, new_path, orig_path, preserve):
|
||||
if not new_path:
|
||||
fail(_("A disk path must be specified to clone '%s'.") % orig_path)
|
||||
|
||||
try:
|
||||
dev = VirtualDisk(conn)
|
||||
dev.path = new_path
|
||||
dev.set_create_storage(size=.0001, sparse=False)
|
||||
dev.validate()
|
||||
except ValueError, e:
|
||||
fail(_("Error with storage parameters: %s" % str(e)))
|
||||
|
||||
cli.validate_disk(dev, warn_overwrite=not preserve)
|
||||
return dev
|
||||
|
||||
|
||||
def get_clone_diskfile(new_diskfiles, design, preserve=False,
|
||||
auto_clone=False):
|
||||
def get_clone_diskfile(new_diskfiles, design, preserve, auto_clone):
|
||||
if new_diskfiles is None:
|
||||
new_diskfiles = [None]
|
||||
|
||||
@ -100,33 +79,20 @@ def get_clone_diskfile(new_diskfiles, design, preserve=False,
|
||||
# Extend the new/passed paths list with None if it's not
|
||||
# long enough
|
||||
new_diskfiles.append(None)
|
||||
disk = new_diskfiles[newidx]
|
||||
newpath = new_diskfiles[newidx]
|
||||
|
||||
if disk is None and auto_clone:
|
||||
disk = design.generate_clone_disk_path(origpath)
|
||||
if newpath is None and auto_clone:
|
||||
newpath = design.generate_clone_disk_path(origpath)
|
||||
|
||||
if origpath is None:
|
||||
devpath = None
|
||||
else:
|
||||
dev = _build_disk(design.conn, disk, origpath, preserve)
|
||||
devpath = dev.path
|
||||
newpath = None
|
||||
|
||||
clonepaths.append(devpath)
|
||||
clonepaths.append(newpath)
|
||||
newidx += 1
|
||||
design.clone_paths = clonepaths
|
||||
|
||||
|
||||
def get_clone_sparse(sparse, design):
|
||||
design.clone_sparse = sparse
|
||||
|
||||
|
||||
def get_preserve(preserve, design):
|
||||
design.preserve = preserve
|
||||
|
||||
|
||||
def get_force_target(target, design):
|
||||
for i in target or []:
|
||||
design.force_target = i
|
||||
for disk in design.clone_disks:
|
||||
cli.validate_disk(disk, warn_overwrite=not preserve)
|
||||
|
||||
|
||||
def parse_args():
|
||||
@ -208,10 +174,12 @@ def main(conn=None):
|
||||
get_clone_name(options.new_name, options.auto_clone, design)
|
||||
|
||||
get_clone_macaddr(options.new_mac, design)
|
||||
get_clone_uuid(options.new_uuid, design)
|
||||
get_clone_sparse(options.sparse, design)
|
||||
get_force_target(options.target, design)
|
||||
get_preserve(options.preserve, design)
|
||||
if options.new_uuid is not None:
|
||||
design.clone_uuid = options.new_uuid
|
||||
for i in options.target or []:
|
||||
design.force_target = i
|
||||
design.clone_sparse = options.sparse
|
||||
design.preserve = options.preserve
|
||||
|
||||
# This determines the devices that need to be cloned, so that
|
||||
# get_clone_diskfile knows how many new disk paths it needs
|
||||
|
@ -136,9 +136,12 @@ class Cloner(object):
|
||||
disk.path = path
|
||||
disk.device = device
|
||||
|
||||
# We fake storage creation params for now, but we will
|
||||
# update it later
|
||||
disk.set_create_storage(fake=True)
|
||||
if path and not self.preserve_dest_disks:
|
||||
# We fake storage creation params for now, but we will
|
||||
# update it later. Just use any clone_path to make sure
|
||||
# validation doesn't trip up
|
||||
clone_path = "/foo/bar"
|
||||
disk.set_create_storage(fake=True, clone_path=clone_path)
|
||||
disk.validate()
|
||||
disklist.append(disk)
|
||||
except Exception, e:
|
||||
@ -533,7 +536,8 @@ class Cloner(object):
|
||||
raise ValueError("Disk path '%s' does not exist." %
|
||||
newd.path)
|
||||
except Exception, e:
|
||||
logging.debug("", exc_info=True)
|
||||
logging.debug("Exception creating clone disk objects",
|
||||
exc_info=True)
|
||||
raise ValueError(_("Could not determine original disk "
|
||||
"information: %s" % str(e)))
|
||||
retdisks.append(newd)
|
||||
|
@ -114,8 +114,8 @@ def _distill_storage(conn, do_create, nomanaged,
|
||||
pass
|
||||
elif path and not nomanaged:
|
||||
path = os.path.abspath(path)
|
||||
vol_object, pool, path_is_pool = diskbackend.check_if_path_managed(
|
||||
conn, path)
|
||||
(vol_object, pool, path_is_pool) = diskbackend.manage_path(conn, path)
|
||||
|
||||
|
||||
creator = None
|
||||
backend = diskbackend.StorageBackend(conn, path, vol_object,
|
||||
@ -123,13 +123,15 @@ def _distill_storage(conn, do_create, nomanaged,
|
||||
if not do_create:
|
||||
return backend, None
|
||||
|
||||
if backend.exists() and path is not None:
|
||||
if vol_install:
|
||||
raise ValueError("vol_install specified but %s exists." %
|
||||
backend.path)
|
||||
elif not clone_path:
|
||||
if backend.exists(auto_check=False) and path is not None:
|
||||
if not clone_path:
|
||||
return backend, None
|
||||
|
||||
if path and not (vol_install or pool or clone_path):
|
||||
raise RuntimeError(_("Don't know how to create storage for "
|
||||
"path '%s'. Use libvirt APIs to manage the parent directory "
|
||||
"as a pool first.") % path)
|
||||
|
||||
if path or vol_install or pool or clone_path:
|
||||
creator = diskbackend.StorageCreator(conn, path, pool,
|
||||
vol_install, clone_path,
|
||||
@ -219,13 +221,9 @@ class VirtualDisk(VirtualDevice):
|
||||
return False
|
||||
|
||||
try:
|
||||
vol = None
|
||||
path_is_pool = False
|
||||
try:
|
||||
vol, ignore, path_is_pool = diskbackend.check_if_path_managed(
|
||||
conn, path)
|
||||
except:
|
||||
pass
|
||||
(vol, pool, path_is_pool) = diskbackend.check_if_path_managed(
|
||||
conn, path)
|
||||
ignore = pool
|
||||
|
||||
if vol or path_is_pool:
|
||||
return True
|
||||
@ -629,8 +627,9 @@ class VirtualDisk(VirtualDevice):
|
||||
if backing_store is not None:
|
||||
backing_store = os.path.abspath(backing_store)
|
||||
|
||||
_validate_path(clone_path)
|
||||
_validate_path(backing_store)
|
||||
if not fake:
|
||||
_validate_path(clone_path)
|
||||
_validate_path(backing_store)
|
||||
|
||||
if fake and size is None:
|
||||
size = .000001
|
||||
@ -692,7 +691,6 @@ class VirtualDisk(VirtualDevice):
|
||||
"""
|
||||
return bool(self._storage_creator)
|
||||
|
||||
|
||||
def validate(self):
|
||||
"""
|
||||
function to validate all the complex interaction between the various
|
||||
@ -713,9 +711,6 @@ class VirtualDisk(VirtualDevice):
|
||||
if not storage_capable:
|
||||
raise ValueError(_("Connection doesn't support remote "
|
||||
"storage."))
|
||||
if not self.__managed_storage():
|
||||
raise ValueError(_("Must specify libvirt managed storage "
|
||||
"if on a remote connection"))
|
||||
|
||||
# The main distinctions from this point forward:
|
||||
# - Are we doing storage API operations or local media checks?
|
||||
|
@ -104,6 +104,10 @@ def check_if_path_managed(conn, path):
|
||||
pool = None
|
||||
verr = str(e)
|
||||
|
||||
if not vol and not pool and verr:
|
||||
raise ValueError(_("Cannot use storage %(path)s: %(err)s") %
|
||||
{'path' : path, 'err' : verr})
|
||||
|
||||
if not vol:
|
||||
# See if path is a pool source, and allow it through
|
||||
trypool = _check_if_pool_source(conn, path)
|
||||
@ -111,26 +115,49 @@ def check_if_path_managed(conn, path):
|
||||
path_is_pool = True
|
||||
pool = trypool
|
||||
|
||||
if not vol and not pool:
|
||||
if not conn.is_remote():
|
||||
# Building local disk
|
||||
return None, None, False
|
||||
|
||||
if not verr:
|
||||
# Since there is no error, no pool was ever found
|
||||
err = (_("Cannot use storage '%(path)s': '%(rootdir)s' is "
|
||||
"not managed on the remote host.") %
|
||||
{'path' : path,
|
||||
'rootdir' : os.path.dirname(path)})
|
||||
else:
|
||||
err = (_("Cannot use storage %(path)s: %(err)s") %
|
||||
{'path' : path, 'err' : verr})
|
||||
|
||||
raise ValueError(err)
|
||||
|
||||
return vol, pool, path_is_pool
|
||||
|
||||
|
||||
def _can_auto_manage(path):
|
||||
path = path or ""
|
||||
skip_prefixes = ["/dev", "/sys", "/proc"]
|
||||
|
||||
for prefix in skip_prefixes:
|
||||
if path.startswith(prefix + "/") or path == prefix:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def manage_path(conn, path):
|
||||
"""
|
||||
If path is not managed, try to create a storage pool to probe the path
|
||||
"""
|
||||
vol, pool, path_is_pool = check_if_path_managed(conn, path)
|
||||
if vol or pool or not _can_auto_manage(path):
|
||||
return vol, pool, path_is_pool
|
||||
|
||||
dirname = os.path.dirname(path)
|
||||
poolname = StoragePool.find_free_name(
|
||||
conn, os.path.basename(dirname) or "pool")
|
||||
logging.debug("Attempting to build pool=%s target=%s", poolname, dirname)
|
||||
|
||||
poolxml = StoragePool(conn)
|
||||
poolxml.name = poolxml.find_free_name(
|
||||
conn, os.path.basename(dirname) or "dirpool")
|
||||
poolxml.type = poolxml.TYPE_DIR
|
||||
poolxml.target_path = dirname
|
||||
pool = poolxml.install(build=False, create=True, autostart=True)
|
||||
conn.clear_cache(pools=True)
|
||||
|
||||
vol = None
|
||||
for checkvol in pool.listVolumes():
|
||||
if checkvol == os.path.basename(path):
|
||||
vol = pool.storageVolLookupByName(checkvol)
|
||||
break
|
||||
|
||||
return vol, pool, False
|
||||
|
||||
|
||||
def build_vol_install(conn, path, pool, size, sparse):
|
||||
# Path wasn't a volume. See if base of path is a managed
|
||||
# pool, and if so, setup a StorageVolume object
|
||||
@ -244,7 +271,7 @@ class StorageCreator(_StorageBase):
|
||||
return self._sparse
|
||||
|
||||
def get_size(self):
|
||||
if not self._size:
|
||||
if self._size is None:
|
||||
self._size = (float(self._vol_install.capacity) /
|
||||
1024.0 / 1024.0 / 1024.0)
|
||||
return self._size
|
||||
@ -276,19 +303,12 @@ class StorageCreator(_StorageBase):
|
||||
|
||||
if self.is_managed():
|
||||
return self._vol_install.validate()
|
||||
|
||||
if devtype == "block":
|
||||
raise ValueError(_("Local block device path '%s' must "
|
||||
"exist.") % self.path)
|
||||
if not os.access(os.path.dirname(self.path), os.R_OK):
|
||||
raise ValueError("No read access to directory '%s'" %
|
||||
os.path.dirname(self.path))
|
||||
if self._size is None:
|
||||
raise ValueError(_("size is required for non-existent disk "
|
||||
"'%s'" % self.path))
|
||||
if not os.access(os.path.dirname(self.path), os.W_OK):
|
||||
raise ValueError(_("No write access to directory '%s'") %
|
||||
os.path.dirname(self.path))
|
||||
|
||||
def is_size_conflict(self):
|
||||
if self._vol_install:
|
||||
@ -323,63 +343,26 @@ class StorageCreator(_StorageBase):
|
||||
if self.fake:
|
||||
raise RuntimeError("Storage creator is fake but creation "
|
||||
"requested.")
|
||||
|
||||
# If a clone_path is specified, but not vol_install.input_vol,
|
||||
# that means we are cloning unmanaged -> managed, so skip this
|
||||
if (self._vol_install and
|
||||
(not self._clone_path or self._vol_install.input_vol)):
|
||||
return self._vol_install.install(meter=progresscb)
|
||||
|
||||
if self._clone_path:
|
||||
text = (_("Cloning %(srcfile)s") %
|
||||
{'srcfile' : os.path.basename(self._clone_path)})
|
||||
else:
|
||||
text = _("Creating storage file %s") % os.path.basename(self._path)
|
||||
if not self._clone_path:
|
||||
raise RuntimeError("Local storage creation requested, "
|
||||
"this shouldn't happen.")
|
||||
|
||||
size_bytes = long(self._size * 1024L * 1024L * 1024L)
|
||||
text = (_("Cloning %(srcfile)s") %
|
||||
{'srcfile' : os.path.basename(self._clone_path)})
|
||||
|
||||
size_bytes = long(self.get_size() * 1024L * 1024L * 1024L)
|
||||
progresscb.start(filename=self._path, size=long(size_bytes),
|
||||
text=text)
|
||||
|
||||
if self._clone_path:
|
||||
# Plain file clone
|
||||
self._clone_local(progresscb, size_bytes)
|
||||
else:
|
||||
# Plain file creation
|
||||
self._create_local_file(progresscb, size_bytes)
|
||||
|
||||
def _create_local_file(self, progresscb, size_bytes):
|
||||
"""
|
||||
Helper function which attempts to build self.path
|
||||
"""
|
||||
fd = None
|
||||
path = self._path
|
||||
sparse = self._sparse
|
||||
|
||||
try:
|
||||
try:
|
||||
fd = os.open(path, os.O_WRONLY | os.O_CREAT | os.O_DSYNC)
|
||||
|
||||
if sparse:
|
||||
os.ftruncate(fd, size_bytes)
|
||||
else:
|
||||
# 1 meg of nulls
|
||||
mb = 1024 * 1024
|
||||
buf = '\x00' * mb
|
||||
|
||||
left = size_bytes
|
||||
while left > 0:
|
||||
if left < mb:
|
||||
buf = '\x00' * left
|
||||
left = max(left - mb, 0)
|
||||
|
||||
os.write(fd, buf)
|
||||
progresscb.update(size_bytes - left)
|
||||
except OSError, e:
|
||||
raise RuntimeError(_("Error creating diskimage %s: %s") %
|
||||
(path, str(e)))
|
||||
finally:
|
||||
if fd is not None:
|
||||
os.close(fd)
|
||||
progresscb.end(size_bytes)
|
||||
# Plain file clone
|
||||
self._clone_local(progresscb, size_bytes)
|
||||
|
||||
def _clone_local(self, meter, size_bytes):
|
||||
if self._clone_path == "/dev/null":
|
||||
@ -521,7 +504,7 @@ class StorageBackend(_StorageBase):
|
||||
self._size = (float(ret) / 1024.0 / 1024.0 / 1024.0)
|
||||
return self._size
|
||||
|
||||
def exists(self):
|
||||
def exists(self, auto_check=True):
|
||||
if self._exists is None:
|
||||
if self.path is None:
|
||||
self._exists = True
|
||||
@ -529,6 +512,14 @@ class StorageBackend(_StorageBase):
|
||||
self._exists = True
|
||||
elif not self._conn.is_remote() and os.path.exists(self._path):
|
||||
self._exists = True
|
||||
elif (auto_check and
|
||||
self._conn.is_remote() and
|
||||
not _can_auto_manage(self._path)):
|
||||
# This allows users to pass /dev/sdX and we don't try to
|
||||
# validate it exists on the remote connection, since
|
||||
# autopooling /dev is perilous. Libvirt will error if
|
||||
# the device doesn't exist.
|
||||
self._exists = True
|
||||
else:
|
||||
self._exists = False
|
||||
return self._exists
|
||||
@ -550,7 +541,7 @@ class StorageBackend(_StorageBase):
|
||||
elif self._pool_object:
|
||||
self._dev_type = self._get_pool_xml().get_vm_disk_type()
|
||||
|
||||
elif self._path:
|
||||
elif self._path and not self._conn.is_remote():
|
||||
if os.path.isdir(self._path):
|
||||
self._dev_type = "dir"
|
||||
elif util.stat_disk(self._path)[0]:
|
||||
|
@ -253,6 +253,15 @@ class StoragePool(_StorageObject):
|
||||
return conn.storagePoolLookupByName(pool.name)
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def find_free_name(conn, basename, **kwargs):
|
||||
"""
|
||||
Finds a name similar (or equal) to passed 'basename' that is not
|
||||
in use by another pool. Extra params are passed to generate_name
|
||||
"""
|
||||
return util.generate_name(basename,
|
||||
conn.storagePoolLookupByName,
|
||||
**kwargs)
|
||||
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
@ -502,7 +511,7 @@ class StorageVolume(_StorageObject):
|
||||
def find_free_name(pool_object, basename, **kwargs):
|
||||
"""
|
||||
Finds a name similar (or equal) to passed 'basename' that is not
|
||||
in use by another pool. Extra params are passed to generate_name
|
||||
in use by another volume. Extra params are passed to generate_name
|
||||
"""
|
||||
pool_object.refresh(0)
|
||||
return util.generate_name(basename,
|
||||
|
Loading…
x
Reference in New Issue
Block a user