mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2025-03-20 06:50:22 +03:00
qemu: block: Don't query monitor in qemuBlockStorageSourceCreateDetectSize
Calling the monitor was convenient for the implementation in qemuDomainBlockCopyCommon, but causes the snapshot code to call query-named-block-nodes for every disk. Fix this by removing the monitor call from qemuBlockStorageSourceCreateDetectSize so that the data can be reused in loops. Signed-off-by: Peter Krempa <pkrempa@redhat.com> ACKed-by: Eric Blake <eblake@redhat.com>
This commit is contained in:
parent
86bf7ded3e
commit
2cff65e4c6
@ -2566,10 +2566,9 @@ qemuBlockStorageSourceCreate(virDomainObjPtr vm,
|
||||
|
||||
/**
|
||||
* qemuBlockStorageSourceCreateDetectSize:
|
||||
* @vm: domain object
|
||||
* @blockNamedNodeData: hash table filled with qemuBlockNamedNodeData
|
||||
* @src: storage source to update size/capacity on
|
||||
* @templ: storage source template
|
||||
* @asyncJob: qemu asynchronous job type
|
||||
*
|
||||
* When creating a storage source via blockdev-create we need to know the size
|
||||
* and capacity of the original volume (e.g. when creating a snapshot or copy).
|
||||
@ -2577,28 +2576,13 @@ qemuBlockStorageSourceCreate(virDomainObjPtr vm,
|
||||
* to the detected sizes from @templ.
|
||||
*/
|
||||
int
|
||||
qemuBlockStorageSourceCreateDetectSize(virDomainObjPtr vm,
|
||||
qemuBlockStorageSourceCreateDetectSize(virHashTablePtr blockNamedNodeData,
|
||||
virStorageSourcePtr src,
|
||||
virStorageSourcePtr templ,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virStorageSourcePtr templ)
|
||||
{
|
||||
qemuDomainObjPrivatePtr priv = vm->privateData;
|
||||
g_autoptr(virHashTable) stats = NULL;
|
||||
qemuBlockStatsPtr entry;
|
||||
int rc;
|
||||
qemuBlockNamedNodeDataPtr entry;
|
||||
|
||||
if (!(stats = virHashCreate(10, virHashValueFree)))
|
||||
return -1;
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0)
|
||||
return -1;
|
||||
|
||||
rc = qemuMonitorBlockStatsUpdateCapacityBlockdev(priv->mon, stats);
|
||||
|
||||
if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0)
|
||||
return -1;
|
||||
|
||||
if (!(entry = virHashLookup(stats, templ->nodeformat))) {
|
||||
if (!(entry = virHashLookup(blockNamedNodeData, templ->nodeformat))) {
|
||||
virReportError(VIR_ERR_INTERNAL_ERROR,
|
||||
_("failed to update capacity data for block node '%s'"),
|
||||
templ->nodeformat);
|
||||
|
@ -194,7 +194,6 @@ qemuBlockStorageSourceCreate(virDomainObjPtr vm,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
|
||||
int
|
||||
qemuBlockStorageSourceCreateDetectSize(virDomainObjPtr vm,
|
||||
qemuBlockStorageSourceCreateDetectSize(virHashTablePtr blockNamedNodeData,
|
||||
virStorageSourcePtr src,
|
||||
virStorageSourcePtr templ,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
virStorageSourcePtr templ);
|
||||
|
@ -15246,6 +15246,7 @@ qemuDomainSnapshotDiskPrepareOne(virQEMUDriverPtr driver,
|
||||
virDomainDiskDefPtr disk,
|
||||
virDomainSnapshotDiskDefPtr snapdisk,
|
||||
qemuDomainSnapshotDiskDataPtr dd,
|
||||
virHashTablePtr blockNamedNodeData,
|
||||
bool reuse,
|
||||
bool blockdev,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
@ -15349,8 +15350,8 @@ qemuDomainSnapshotDiskPrepareOne(virQEMUDriverPtr driver,
|
||||
if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0)
|
||||
return -1;
|
||||
} else {
|
||||
if (qemuBlockStorageSourceCreateDetectSize(vm, dd->src, dd->disk->src,
|
||||
asyncJob) < 0)
|
||||
if (qemuBlockStorageSourceCreateDetectSize(blockNamedNodeData,
|
||||
dd->src, dd->disk->src) < 0)
|
||||
return -1;
|
||||
|
||||
if (qemuBlockStorageSourceCreate(vm, dd->src, dd->disk->src,
|
||||
@ -15379,6 +15380,7 @@ qemuDomainSnapshotDiskPrepare(virQEMUDriverPtr driver,
|
||||
virQEMUDriverConfigPtr cfg,
|
||||
bool reuse,
|
||||
bool blockdev,
|
||||
virHashTablePtr blockNamedNodeData,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
qemuDomainSnapshotDiskDataPtr *rdata,
|
||||
size_t *rndata)
|
||||
@ -15398,7 +15400,9 @@ qemuDomainSnapshotDiskPrepare(virQEMUDriverPtr driver,
|
||||
|
||||
if (qemuDomainSnapshotDiskPrepareOne(driver, vm, cfg, vm->def->disks[i],
|
||||
snapdef->disks + i,
|
||||
data + ndata++, reuse, blockdev,
|
||||
data + ndata++,
|
||||
blockNamedNodeData,
|
||||
reuse, blockdev,
|
||||
asyncJob) < 0)
|
||||
goto cleanup;
|
||||
}
|
||||
@ -15489,6 +15493,7 @@ qemuDomainSnapshotCreateDiskActive(virQEMUDriverPtr driver,
|
||||
qemuDomainSnapshotDiskDataPtr diskdata = NULL;
|
||||
size_t ndiskdata = 0;
|
||||
bool blockdev = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV);
|
||||
g_autoptr(virHashTable) blockNamedNodeData = NULL;
|
||||
|
||||
if (virDomainObjCheckActive(vm) < 0)
|
||||
return -1;
|
||||
@ -15496,10 +15501,21 @@ qemuDomainSnapshotCreateDiskActive(virQEMUDriverPtr driver,
|
||||
if (!(actions = virJSONValueNewArray()))
|
||||
return -1;
|
||||
|
||||
if (blockdev) {
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
||||
return -1;
|
||||
|
||||
blockNamedNodeData = qemuMonitorBlockGetNamedNodeData(priv->mon);
|
||||
|
||||
if (qemuDomainObjExitMonitor(driver, vm) < 0 || !blockNamedNodeData)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* prepare a list of objects to use in the vm definition so that we don't
|
||||
* have to roll back later */
|
||||
if (qemuDomainSnapshotDiskPrepare(driver, vm, snap, cfg, reuse, blockdev,
|
||||
asyncJob, &diskdata, &ndiskdata) < 0)
|
||||
blockNamedNodeData, asyncJob,
|
||||
&diskdata, &ndiskdata) < 0)
|
||||
goto cleanup;
|
||||
|
||||
/* check whether there's anything to do */
|
||||
@ -18008,6 +18024,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm,
|
||||
g_autoptr(qemuBlockStorageSourceChainData) crdata = NULL;
|
||||
virStorageSourcePtr n;
|
||||
virStorageSourcePtr mirrorBacking = NULL;
|
||||
g_autoptr(virHashTable) blockNamedNodeData = NULL;
|
||||
int rc = 0;
|
||||
|
||||
/* Preliminaries: find the disk we are editing, sanity checks */
|
||||
@ -18169,7 +18186,13 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm,
|
||||
priv->qemuCaps)))
|
||||
goto endjob;
|
||||
} else {
|
||||
if (qemuBlockStorageSourceCreateDetectSize(vm, mirror, disk->src, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
qemuDomainObjEnterMonitor(driver, vm);
|
||||
blockNamedNodeData = qemuMonitorBlockGetNamedNodeData(priv->mon);
|
||||
if (qemuDomainObjExitMonitor(driver, vm) < 0 || !blockNamedNodeData)
|
||||
goto endjob;
|
||||
|
||||
if (qemuBlockStorageSourceCreateDetectSize(blockNamedNodeData,
|
||||
mirror, disk->src))
|
||||
goto endjob;
|
||||
|
||||
if (mirror_shallow) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user