mirror of
https://github.com/virt-manager/virt-manager.git
synced 2025-03-29 02:50:19 +03:00
objects: Erase the notion of connkey
Make it explicit that all uses of this is actually the object name. We already leaked this abstraction in several places so better to make it explicit. This also communicates to users that this is a field that is not immutable so it shouldn't be used as a unique key Signed-off-by: Cole Robinson <crobinso@redhat.com>
This commit is contained in:
parent
8ff0da7664
commit
e45a2228dc
@ -60,7 +60,7 @@ class TestConn(unittest.TestCase):
|
||||
# Add coverage for conn fetch_* handling, and pollhelpers
|
||||
conn = cli.getConnection("test:///default")
|
||||
objmap = {}
|
||||
def build_cb(obj, connkey):
|
||||
def build_cb(obj, name):
|
||||
return obj
|
||||
|
||||
gone, new, master = pollhelpers.fetch_nets(conn, {}, build_cb)
|
||||
|
@ -1259,7 +1259,7 @@ class vmmAddHardware(vmmGObjectUI):
|
||||
|
||||
if poolname:
|
||||
try:
|
||||
pool = self.conn.get_pool(poolname)
|
||||
pool = self.conn.get_pool_by_name(poolname)
|
||||
self.idle_add(pool.refresh)
|
||||
except Exception: # pragma: no cover
|
||||
log.debug("Error looking up pool=%s for refresh after "
|
||||
|
@ -873,7 +873,7 @@ class vmmCloneVM(vmmGObjectUI):
|
||||
|
||||
for poolname in refresh_pools:
|
||||
try:
|
||||
pool = self.conn.get_pool(poolname)
|
||||
pool = self.conn.get_pool_by_name(poolname)
|
||||
self.idle_add(pool.refresh)
|
||||
except Exception:
|
||||
log.debug("Error looking up pool=%s for refresh after "
|
||||
|
@ -44,7 +44,7 @@ class _ObjectList(vmmGObject):
|
||||
self._objects = []
|
||||
|
||||
def _blacklist_key(self, obj):
|
||||
return str(obj.__class__) + obj.get_connkey()
|
||||
return str(obj.__class__) + obj.get_name()
|
||||
|
||||
def add_blacklist(self, obj):
|
||||
"""
|
||||
@ -108,7 +108,7 @@ class _ObjectList(vmmGObject):
|
||||
# lock the whole time to prevent a 'time of check' issue
|
||||
for checkobj in self._objects:
|
||||
if (checkobj.__class__ == obj.__class__ and
|
||||
checkobj.get_connkey() == obj.get_connkey()):
|
||||
checkobj.get_name() == obj.get_name()):
|
||||
return False
|
||||
if obj in self._objects:
|
||||
return False
|
||||
@ -123,13 +123,13 @@ class _ObjectList(vmmGObject):
|
||||
with self._lock:
|
||||
return [o for o in self._objects if o.__class__ is classobj]
|
||||
|
||||
def lookup_object(self, classobj, connkey):
|
||||
def lookup_object(self, classobj, name):
|
||||
"""
|
||||
Lookup an object with the passed classobj + connkey
|
||||
Lookup an object with the passed classobj + name
|
||||
"""
|
||||
# Doesn't require locking, since get_objects_for_class covers us
|
||||
for obj in self.get_objects_for_class(classobj):
|
||||
if obj.get_connkey() == connkey:
|
||||
if obj.get_name() == name:
|
||||
return obj
|
||||
return None
|
||||
|
||||
@ -284,7 +284,7 @@ class vmmConnection(vmmGObject):
|
||||
name = obj.name()
|
||||
self.schedule_priority_tick(pollpool=True)
|
||||
def compare_cb():
|
||||
return bool(self.get_pool(name))
|
||||
return bool(self.get_pool_by_name(name))
|
||||
self._wait_for_condition(compare_cb)
|
||||
self._backend.cb_cache_new_pool = cache_new_pool
|
||||
|
||||
@ -502,23 +502,23 @@ class vmmConnection(vmmGObject):
|
||||
# Libvirt object lookup methods #
|
||||
#################################
|
||||
|
||||
def get_vm(self, connkey):
|
||||
return self._objects.lookup_object(vmmDomain, connkey)
|
||||
def get_vm_by_name(self, name):
|
||||
return self._objects.lookup_object(vmmDomain, name)
|
||||
def list_vms(self):
|
||||
return self._objects.get_objects_for_class(vmmDomain)
|
||||
|
||||
def get_net(self, connkey):
|
||||
return self._objects.lookup_object(vmmNetwork, connkey)
|
||||
def get_net_by_name(self, name):
|
||||
return self._objects.lookup_object(vmmNetwork, name)
|
||||
def list_nets(self):
|
||||
return self._objects.get_objects_for_class(vmmNetwork)
|
||||
|
||||
def get_pool(self, connkey):
|
||||
return self._objects.lookup_object(vmmStoragePool, connkey)
|
||||
def get_pool_by_name(self, name):
|
||||
return self._objects.lookup_object(vmmStoragePool, name)
|
||||
def list_pools(self):
|
||||
return self._objects.get_objects_for_class(vmmStoragePool)
|
||||
|
||||
def get_nodedev(self, connkey):
|
||||
return self._objects.lookup_object(vmmNodeDevice, connkey)
|
||||
def get_nodedev_by_name(self, name):
|
||||
return self._objects.lookup_object(vmmNodeDevice, name)
|
||||
def list_nodedevs(self):
|
||||
return self._objects.get_objects_for_class(vmmNodeDevice)
|
||||
|
||||
@ -557,8 +557,7 @@ class vmmConnection(vmmGObject):
|
||||
def define_pool(self, xml):
|
||||
return self._backend.storagePoolDefineXML(xml, 0)
|
||||
|
||||
def rename_object(self, obj, origxml, newxml, oldconnkey):
|
||||
ignore = oldconnkey
|
||||
def rename_object(self, obj, origxml, newxml):
|
||||
if obj.is_domain():
|
||||
define_cb = self.define_domain
|
||||
elif obj.is_pool():
|
||||
@ -620,7 +619,7 @@ class vmmConnection(vmmGObject):
|
||||
name = domain.name()
|
||||
log.debug("domain xmlmisc event: domain=%s event=%s args=%s",
|
||||
name, eventstr, args)
|
||||
obj = self.get_vm(name)
|
||||
obj = self.get_vm_by_name(name)
|
||||
if not obj:
|
||||
return
|
||||
|
||||
@ -634,7 +633,7 @@ class vmmConnection(vmmGObject):
|
||||
log.debug("domain lifecycle event: domain=%s %s", name,
|
||||
LibvirtEnumMap.domain_lifecycle_str(state, reason))
|
||||
|
||||
obj = self.get_vm(name)
|
||||
obj = self.get_vm_by_name(name)
|
||||
|
||||
if obj:
|
||||
self.idle_add(obj.recache_from_event_loop)
|
||||
@ -649,7 +648,7 @@ class vmmConnection(vmmGObject):
|
||||
log.debug("domain agent lifecycle event: domain=%s %s", name,
|
||||
LibvirtEnumMap.domain_agent_lifecycle_str(state, reason))
|
||||
|
||||
obj = self.get_vm(name)
|
||||
obj = self.get_vm_by_name(name)
|
||||
|
||||
if obj:
|
||||
self.idle_add(obj.recache_from_event_loop)
|
||||
@ -663,7 +662,7 @@ class vmmConnection(vmmGObject):
|
||||
name = network.name()
|
||||
log.debug("network lifecycle event: network=%s %s",
|
||||
name, LibvirtEnumMap.network_lifecycle_str(state, reason))
|
||||
obj = self.get_net(name)
|
||||
obj = self.get_net_by_name(name)
|
||||
|
||||
if obj:
|
||||
self.idle_add(obj.recache_from_event_loop)
|
||||
@ -679,7 +678,7 @@ class vmmConnection(vmmGObject):
|
||||
log.debug("storage pool lifecycle event: pool=%s %s",
|
||||
name, LibvirtEnumMap.storage_lifecycle_str(state, reason))
|
||||
|
||||
obj = self.get_pool(name)
|
||||
obj = self.get_pool_by_name(name)
|
||||
|
||||
if obj:
|
||||
self.idle_add(obj.recache_from_event_loop)
|
||||
@ -693,7 +692,7 @@ class vmmConnection(vmmGObject):
|
||||
name = pool.name()
|
||||
log.debug("storage pool refresh event: pool=%s", name)
|
||||
|
||||
obj = self.get_pool(name)
|
||||
obj = self.get_pool_by_name(name)
|
||||
|
||||
if not obj:
|
||||
return
|
||||
@ -718,7 +717,7 @@ class vmmConnection(vmmGObject):
|
||||
name = dev.name()
|
||||
log.debug("node device update event: nodedev=%s", name)
|
||||
|
||||
obj = self.get_nodedev(name)
|
||||
obj = self.get_nodedev_by_name(name)
|
||||
|
||||
if obj:
|
||||
self.idle_add(obj.recache_from_event_loop)
|
||||
@ -1106,9 +1105,9 @@ class vmmConnection(vmmGObject):
|
||||
pollcb = pollhelpers.fetch_vms
|
||||
|
||||
|
||||
keymap = dict((o.get_connkey(), o) for o in objs)
|
||||
def cb(obj, key):
|
||||
return cls(self, obj, key)
|
||||
keymap = dict((o.get_name(), o) for o in objs)
|
||||
def cb(obj, name):
|
||||
return cls(self, obj, name)
|
||||
if dopoll:
|
||||
gone, new, master = pollcb(self._backend, keymap, cb)
|
||||
else:
|
||||
|
@ -2036,7 +2036,7 @@ class vmmCreateVM(vmmGObjectUI):
|
||||
# Kick off pool updates
|
||||
for poolname in refresh_pools:
|
||||
try:
|
||||
pool = self.conn.get_pool(poolname)
|
||||
pool = self.conn.get_pool_by_name(poolname)
|
||||
self.idle_add(pool.refresh)
|
||||
except Exception: # pragma: no cover
|
||||
log.debug("Error looking up pool=%s for refresh after "
|
||||
|
@ -17,7 +17,7 @@ from .xmleditor import vmmXMLEditor
|
||||
|
||||
class vmmCreateVolume(vmmGObjectUI):
|
||||
__gsignals__ = {
|
||||
"vol-created": (vmmGObjectUI.RUN_FIRST, None, [str, str]),
|
||||
"vol-created": (vmmGObjectUI.RUN_FIRST, None, [object, object]),
|
||||
}
|
||||
|
||||
def __init__(self, conn, parent_pool):
|
||||
@ -284,7 +284,8 @@ class vmmCreateVolume(vmmGObjectUI):
|
||||
##################
|
||||
|
||||
def _pool_refreshed_cb(self, pool, volname):
|
||||
self.emit("vol-created", pool.get_connkey(), volname)
|
||||
vol = pool.get_volume_by_name(volname)
|
||||
self.emit("vol-created", pool, vol)
|
||||
|
||||
def _finish_cb(self, error, details, vol):
|
||||
self.reset_finish_cursor()
|
||||
|
@ -17,18 +17,16 @@ NET_ROW_SOURCE = 1
|
||||
NET_ROW_LABEL = 2
|
||||
NET_ROW_SENSITIVE = 3
|
||||
NET_ROW_MANUAL = 4
|
||||
NET_ROW_CONNKEY = 5
|
||||
|
||||
|
||||
def _build_row(nettype, source_name,
|
||||
label, is_sensitive, manual=False, connkey=None):
|
||||
label, is_sensitive, manual=False):
|
||||
row = []
|
||||
row.insert(NET_ROW_TYPE, nettype)
|
||||
row.insert(NET_ROW_SOURCE, source_name)
|
||||
row.insert(NET_ROW_LABEL, label)
|
||||
row.insert(NET_ROW_SENSITIVE, is_sensitive)
|
||||
row.insert(NET_ROW_MANUAL, manual)
|
||||
row.insert(NET_ROW_CONNKEY, connkey)
|
||||
return row
|
||||
|
||||
|
||||
@ -94,7 +92,6 @@ class vmmNetworkList(vmmGObjectUI):
|
||||
fields.insert(NET_ROW_LABEL, str)
|
||||
fields.insert(NET_ROW_SENSITIVE, bool)
|
||||
fields.insert(NET_ROW_MANUAL, bool)
|
||||
fields.insert(NET_ROW_CONNKEY, str)
|
||||
|
||||
model = Gtk.ListStore(*fields)
|
||||
combo = self.widget("net-source")
|
||||
@ -121,9 +118,8 @@ class vmmNetworkList(vmmGObjectUI):
|
||||
if net.get_xmlobj().virtualport_type == "openvswitch":
|
||||
label += " (OpenVSwitch)"
|
||||
|
||||
rows.append(_build_row(
|
||||
nettype, net.get_name(), label, True,
|
||||
connkey=net.get_connkey()))
|
||||
row = _build_row(nettype, net.get_name(), label, True)
|
||||
rows.append(row)
|
||||
|
||||
return rows
|
||||
|
||||
@ -190,10 +186,7 @@ class vmmNetworkList(vmmGObjectUI):
|
||||
|
||||
netobj = None
|
||||
if net.type == virtinst.DeviceInterface.TYPE_VIRTUAL:
|
||||
for n in self.conn.list_nets():
|
||||
if n.get_name() == devname:
|
||||
netobj = n
|
||||
break
|
||||
netobj = self.conn.get_net_by_name(devname)
|
||||
|
||||
if not netobj or netobj.is_active():
|
||||
return
|
||||
|
@ -96,8 +96,8 @@ class vmmHostNets(vmmGObjectUI):
|
||||
self._xmleditor.connect("xml-reset",
|
||||
self._xmleditor_xml_reset_cb)
|
||||
|
||||
# [ unique, label, icon name, icon size, is_active ]
|
||||
netListModel = Gtk.ListStore(str, str, str, int, bool)
|
||||
# [ netobj, label, icon name, icon size, is_active ]
|
||||
netListModel = Gtk.ListStore(object, str, str, int, bool)
|
||||
self.widget("net-list").set_model(netListModel)
|
||||
|
||||
sel = self.widget("net-list").get_selection()
|
||||
@ -148,8 +148,7 @@ class vmmHostNets(vmmGObjectUI):
|
||||
self._set_error_page(_("Connection not active."))
|
||||
|
||||
def _current_network(self):
|
||||
connkey = uiutil.get_list_selection(self.widget("net-list"))
|
||||
return connkey and self.conn.get_net(connkey)
|
||||
return uiutil.get_list_selection(self.widget("net-list"))
|
||||
|
||||
def _set_error_page(self, msg):
|
||||
self.widget("network-pages").set_current_page(1)
|
||||
@ -187,14 +186,13 @@ class vmmHostNets(vmmGObjectUI):
|
||||
for net in self.conn.list_nets():
|
||||
net.disconnect_by_obj(self)
|
||||
net.connect("state-changed", self._net_state_changed_cb)
|
||||
model.append([net.get_connkey(), net.get_name(), "network-idle",
|
||||
model.append([net, net.get_name(), "network-idle",
|
||||
Gtk.IconSize.LARGE_TOOLBAR,
|
||||
bool(net.is_active())])
|
||||
finally:
|
||||
net_list.set_model(model)
|
||||
|
||||
uiutil.set_list_selection(net_list,
|
||||
curnet and curnet.get_connkey() or None)
|
||||
uiutil.set_list_selection(net_list, curnet)
|
||||
|
||||
def _populate_net_ipv4_state(self, net):
|
||||
(netstr, (dhcpstart, dhcpend)) = net.get_ipv4_network()
|
||||
@ -373,12 +371,12 @@ class vmmHostNets(vmmGObjectUI):
|
||||
def _net_state_changed_cb(self, net):
|
||||
# Update net state inline in the tree model
|
||||
for row in self.widget("net-list").get_model():
|
||||
if row[0] == net.get_connkey():
|
||||
if row[0] == net:
|
||||
row[4] = net.is_active()
|
||||
|
||||
# If refreshed network is the current net, refresh the UI
|
||||
curnet = self._current_network()
|
||||
if curnet and curnet.get_connkey() == net.get_connkey():
|
||||
if curnet == net:
|
||||
self._refresh_current_network()
|
||||
|
||||
def _net_selected_cb(self, selection):
|
||||
|
@ -26,7 +26,7 @@ EDIT_POOL_XML,
|
||||
) = list(range(3))
|
||||
|
||||
VOL_NUM_COLUMNS = 7
|
||||
(VOL_COLUMN_KEY,
|
||||
(VOL_COLUMN_HANDLE,
|
||||
VOL_COLUMN_NAME,
|
||||
VOL_COLUMN_CAPACITY,
|
||||
VOL_COLUMN_SIZESTR,
|
||||
@ -35,7 +35,7 @@ VOL_NUM_COLUMNS = 7
|
||||
VOL_COLUMN_SENSITIVE) = range(VOL_NUM_COLUMNS)
|
||||
|
||||
POOL_NUM_COLUMNS = 4
|
||||
(POOL_COLUMN_CONNKEY,
|
||||
(POOL_COLUMN_HANDLE,
|
||||
POOL_COLUMN_LABEL,
|
||||
POOL_COLUMN_ISACTIVE,
|
||||
POOL_COLUMN_PERCENT) = range(POOL_NUM_COLUMNS)
|
||||
@ -184,8 +184,8 @@ class vmmHostStorage(vmmGObjectUI):
|
||||
self._volmenu.add(volCopyPath)
|
||||
|
||||
# Volume list
|
||||
# [key, name, sizestr, capacity, format, in use by string, sensitive]
|
||||
volListModel = Gtk.ListStore(str, str, str, str, str, str, bool)
|
||||
# [obj, name, sizestr, capacity, format, in use by string, sensitive]
|
||||
volListModel = Gtk.ListStore(object, str, str, str, str, str, bool)
|
||||
self.widget("vol-list").set_model(volListModel)
|
||||
|
||||
volCol = Gtk.TreeViewColumn(_("Volumes"))
|
||||
@ -225,9 +225,9 @@ class vmmHostStorage(vmmGObjectUI):
|
||||
Gtk.SortType.ASCENDING)
|
||||
|
||||
# Init pool list
|
||||
# [connkey, label, pool.is_active(), percent string]
|
||||
# [pool object, label, pool.is_active(), percent string]
|
||||
pool_list = self.widget("pool-list")
|
||||
poolListModel = Gtk.ListStore(str, str, bool, str)
|
||||
poolListModel = Gtk.ListStore(object, str, bool, str)
|
||||
pool_list.set_model(poolListModel)
|
||||
|
||||
poolCol = Gtk.TreeViewColumn(_("Storage Pools"))
|
||||
@ -281,33 +281,27 @@ class vmmHostStorage(vmmGObjectUI):
|
||||
self._set_error_page(_("Connection not active."))
|
||||
|
||||
def _current_pool(self):
|
||||
connkey = uiutil.get_list_selection(self.widget("pool-list"))
|
||||
return connkey and self.conn.get_pool(connkey)
|
||||
return uiutil.get_list_selection(self.widget("pool-list"))
|
||||
|
||||
def _current_vol(self):
|
||||
pool = self._current_pool()
|
||||
if not pool:
|
||||
return None # pragma: no cover
|
||||
return uiutil.get_list_selection(self.widget("vol-list"))
|
||||
|
||||
connkey = uiutil.get_list_selection(self.widget("vol-list"))
|
||||
return connkey and pool.get_volume(connkey)
|
||||
|
||||
def _update_pool_row(self, connkey):
|
||||
def _update_pool_row(self, pool):
|
||||
for row in self.widget("pool-list").get_model():
|
||||
if row[POOL_COLUMN_CONNKEY] != connkey:
|
||||
if row[POOL_COLUMN_HANDLE] != pool:
|
||||
continue
|
||||
|
||||
# Update active sensitivity and percent available for passed key
|
||||
pool = self.conn.get_pool(connkey)
|
||||
row[POOL_COLUMN_ISACTIVE] = pool.is_active()
|
||||
row[POOL_COLUMN_PERCENT] = _get_pool_size_percent(pool)
|
||||
break
|
||||
|
||||
curpool = self._current_pool()
|
||||
if not curpool or curpool.get_connkey() != connkey:
|
||||
return
|
||||
|
||||
self._refresh_current_pool()
|
||||
if curpool == pool:
|
||||
self._refresh_current_pool()
|
||||
|
||||
def _populate_pool_state(self, pool):
|
||||
auto = pool.get_autostart()
|
||||
@ -394,7 +388,7 @@ class vmmHostStorage(vmmGObjectUI):
|
||||
label = "%s\n<span size='small'>%s</span>" % (name, typ)
|
||||
|
||||
row = [None] * POOL_NUM_COLUMNS
|
||||
row[POOL_COLUMN_CONNKEY] = pool.get_connkey()
|
||||
row[POOL_COLUMN_HANDLE] = pool
|
||||
row[POOL_COLUMN_LABEL] = label
|
||||
row[POOL_COLUMN_ISACTIVE] = pool.is_active()
|
||||
row[POOL_COLUMN_PERCENT] = _get_pool_size_percent(pool)
|
||||
@ -403,8 +397,7 @@ class vmmHostStorage(vmmGObjectUI):
|
||||
finally:
|
||||
pool_list.set_model(model)
|
||||
|
||||
uiutil.set_list_selection(pool_list,
|
||||
curpool and curpool.get_connkey() or None)
|
||||
uiutil.set_list_selection(pool_list, curpool)
|
||||
|
||||
def _populate_vols(self):
|
||||
list_widget = self.widget("vol-list")
|
||||
@ -418,8 +411,6 @@ class vmmHostStorage(vmmGObjectUI):
|
||||
vscroll_percent = vadj.get_value() // max(vadj.get_upper(), 1)
|
||||
|
||||
for vol in vols:
|
||||
key = vol.get_connkey()
|
||||
|
||||
try:
|
||||
path = vol.get_target_path()
|
||||
name = vol.get_pretty_name(pool.get_type())
|
||||
@ -428,7 +419,7 @@ class vmmHostStorage(vmmGObjectUI):
|
||||
fmt = vol.get_format() or ""
|
||||
except Exception: # pragma: no cover
|
||||
log.debug("Error getting volume info for '%s', "
|
||||
"hiding it", key, exc_info=True)
|
||||
"hiding it", name, exc_info=True)
|
||||
continue
|
||||
|
||||
namestr = None
|
||||
@ -448,7 +439,7 @@ class vmmHostStorage(vmmGObjectUI):
|
||||
sensitive = self._vol_sensitive_cb(fmt)
|
||||
|
||||
row = [None] * VOL_NUM_COLUMNS
|
||||
row[VOL_COLUMN_KEY] = key
|
||||
row[VOL_COLUMN_HANDLE] = vol
|
||||
row[VOL_COLUMN_NAME] = name
|
||||
row[VOL_COLUMN_SIZESTR] = sizestr
|
||||
row[VOL_COLUMN_CAPACITY] = cap
|
||||
@ -635,16 +626,14 @@ class vmmHostStorage(vmmGObjectUI):
|
||||
def _vol_list_row_activated_cb(self, src, treeiter, viewcol):
|
||||
self.emit("volume-chosen", self._current_vol())
|
||||
|
||||
def _vol_created_cb(self, src, pool_connkey, volname):
|
||||
def _vol_created_cb(self, src, pool, vol):
|
||||
# The vol list will have already been updated, since this
|
||||
# signal arrives only after pool-refreshed. So all we do here is
|
||||
# select the vol we just created.
|
||||
pool = self._current_pool()
|
||||
if not pool or pool.get_connkey() != pool_connkey:
|
||||
curpool = self._current_pool()
|
||||
if curpool != pool:
|
||||
return # pragma: no cover
|
||||
|
||||
# Select the new volume
|
||||
uiutil.set_list_selection(self.widget("vol-list"), volname)
|
||||
uiutil.set_list_selection(self.widget("vol-list"), vol)
|
||||
|
||||
def _pool_autostart_changed_cb(self, src):
|
||||
self._enable_pool_apply(EDIT_POOL_AUTOSTART)
|
||||
@ -666,7 +655,7 @@ class vmmHostStorage(vmmGObjectUI):
|
||||
self.emit("cancel-clicked")
|
||||
|
||||
def _pool_changed_cb(self, pool):
|
||||
self._update_pool_row(pool.get_connkey())
|
||||
self._update_pool_row(pool)
|
||||
|
||||
def _conn_state_changed_cb(self, conn):
|
||||
self._refresh_conn_state()
|
||||
|
@ -247,7 +247,7 @@ class vmmInspection(vmmGObject):
|
||||
self._uris.append(uri)
|
||||
conn.connect("vm-added", self._vm_added_cb)
|
||||
for vm in conn.list_vms():
|
||||
self._vm_added_cb(conn, vm.get_connkey())
|
||||
self._vm_added_cb(conn, vm.get_name())
|
||||
|
||||
def _conn_removed_cb(self, connmanager, uri):
|
||||
self._uris.remove(uri)
|
||||
@ -259,7 +259,7 @@ class vmmInspection(vmmGObject):
|
||||
log.debug("ignore libvirt/guestfs temporary VM %s", name)
|
||||
return
|
||||
|
||||
self._q.put((conn.get_uri(), vm.get_connkey()))
|
||||
self._q.put((conn.get_uri(), vm.get_name()))
|
||||
|
||||
def _start(self):
|
||||
self._thread = threading.Thread(
|
||||
@ -282,17 +282,17 @@ class vmmInspection(vmmGObject):
|
||||
if data is None:
|
||||
log.debug("libguestfs queue vm=None, exiting thread")
|
||||
return
|
||||
uri, connkey = data
|
||||
self._process_vm(uri, connkey)
|
||||
uri, vmname = data
|
||||
self._process_vm(uri, vmname)
|
||||
self._q.task_done()
|
||||
|
||||
def _process_vm(self, uri, connkey):
|
||||
def _process_vm(self, uri, vmname):
|
||||
connmanager = vmmConnectionManager.get_instance()
|
||||
conn = connmanager.conns.get(uri)
|
||||
if not conn:
|
||||
return
|
||||
|
||||
vm = conn.get_vm(connkey)
|
||||
vm = conn.get_vm_by_name(vmname)
|
||||
if not vm:
|
||||
return
|
||||
|
||||
@ -344,4 +344,4 @@ class vmmInspection(vmmGObject):
|
||||
# as the data itself will be replaced once the new
|
||||
# results are available.
|
||||
self._cached_data.pop(vm.get_uuid(), None)
|
||||
self._q.put((vm.conn.get_uri(), vm.get_connkey()))
|
||||
self._q.put((vm.conn.get_uri(), vm.get_name()))
|
||||
|
@ -450,6 +450,6 @@ class vmmStatsManager(vmmGObject):
|
||||
self._latest_all_stats = self._get_all_stats(conn)
|
||||
|
||||
def get_vm_statslist(self, vm):
|
||||
if vm.get_connkey() not in self._vm_stats:
|
||||
self._vm_stats[vm.get_connkey()] = _VMStatsList()
|
||||
return self._vm_stats[vm.get_connkey()]
|
||||
if vm.get_name() not in self._vm_stats:
|
||||
self._vm_stats[vm.get_name()] = _VMStatsList()
|
||||
return self._vm_stats[vm.get_name()]
|
||||
|
@ -75,7 +75,7 @@ class _IPFetcher:
|
||||
self._cache = {"qemuga": {}, "arp": {}}
|
||||
|
||||
if iface.type == "network":
|
||||
net = vm.conn.get_net(iface.source)
|
||||
net = vm.conn.get_net_by_name(iface.source)
|
||||
if net:
|
||||
net.get_dhcp_leases(refresh=True)
|
||||
|
||||
@ -99,7 +99,7 @@ class _IPFetcher:
|
||||
arp = self._cache["arp"]
|
||||
leases = []
|
||||
if iface.type == "network":
|
||||
net = vm.conn.get_net(iface.source)
|
||||
net = vm.conn.get_net_by_name(iface.source)
|
||||
if net:
|
||||
leases = net.get_dhcp_leases()
|
||||
|
||||
@ -184,9 +184,6 @@ class vmmDomainSnapshot(vmmLibvirtObject):
|
||||
# Required class methods #
|
||||
##########################
|
||||
|
||||
def _backend_get_name(self):
|
||||
return self._backend.getName()
|
||||
|
||||
def _conn_tick_poll_param(self):
|
||||
return None # pragma: no cover
|
||||
def class_name(self):
|
||||
|
@ -18,12 +18,12 @@ class vmmLibvirtObject(vmmGObject):
|
||||
_STATUS_ACTIVE = 1
|
||||
_STATUS_INACTIVE = 2
|
||||
|
||||
def __init__(self, conn, backend, key, parseclass):
|
||||
def __init__(self, conn, backend, name, parseclass):
|
||||
vmmGObject.__init__(self)
|
||||
self._conn = conn
|
||||
self._backend = backend
|
||||
self._key = key
|
||||
self._parseclass = parseclass
|
||||
self._name = name
|
||||
|
||||
self.__initialized = False
|
||||
self.__status = None
|
||||
@ -36,11 +36,6 @@ class vmmLibvirtObject(vmmGObject):
|
||||
self._inactive_xml_flags = 0
|
||||
self._active_xml_flags = 0
|
||||
|
||||
# Cache object name. We may need to do this even
|
||||
# before init_libvirt_state since it might be needed ahead of time.
|
||||
self._name = None
|
||||
self.get_name()
|
||||
|
||||
@staticmethod
|
||||
def log_redefine_xml_diff(obj, origxml, newxml):
|
||||
if origxml == newxml:
|
||||
@ -94,8 +89,6 @@ class vmmLibvirtObject(vmmGObject):
|
||||
|
||||
def get_backend(self):
|
||||
return self._backend
|
||||
def get_connkey(self):
|
||||
return self._key
|
||||
|
||||
def is_domain(self):
|
||||
return self.class_name() == "domain"
|
||||
@ -111,7 +104,6 @@ class vmmLibvirtObject(vmmGObject):
|
||||
self._backend = newbackend
|
||||
|
||||
def define_name(self, newname):
|
||||
oldconnkey = self.get_connkey()
|
||||
oldname = self.get_xmlobj().name
|
||||
|
||||
self.ensure_latest_xml()
|
||||
@ -126,10 +118,10 @@ class vmmLibvirtObject(vmmGObject):
|
||||
newxml = xmlobj.get_xml()
|
||||
|
||||
try:
|
||||
self._key = newname
|
||||
self.conn.rename_object(self, origxml, newxml, oldconnkey)
|
||||
self._name = newname
|
||||
self.conn.rename_object(self, origxml, newxml)
|
||||
except Exception: # pragma: no cover
|
||||
self._key = oldname
|
||||
self._name = oldname
|
||||
raise
|
||||
finally:
|
||||
self.__force_refresh_xml()
|
||||
@ -163,13 +155,8 @@ class vmmLibvirtObject(vmmGObject):
|
||||
ignore = force
|
||||
|
||||
def get_name(self):
|
||||
if self._name is None:
|
||||
self._name = self._backend_get_name()
|
||||
return self._name
|
||||
|
||||
def _backend_get_name(self):
|
||||
return self._backend.name()
|
||||
|
||||
def tick(self, stats_update=True):
|
||||
ignore = stats_update
|
||||
self._refresh_status()
|
||||
@ -361,8 +348,6 @@ class vmmLibvirtObject(vmmGObject):
|
||||
Mark cached XML as invalid. Subclasses may extend this
|
||||
to invalidate any specific caches of their own
|
||||
"""
|
||||
self._name = None
|
||||
|
||||
# While for events we do want to clear cached XML values like
|
||||
# _name, the XML is never invalid.
|
||||
self._is_xml_valid = self._using_events()
|
||||
|
@ -55,8 +55,6 @@ class vmmNodeDevice(vmmLibvirtObject):
|
||||
return self._backend.XMLDesc(flags)
|
||||
def _get_backend_status(self):
|
||||
return self._STATUS_ACTIVE
|
||||
def _backend_get_name(self):
|
||||
return self.get_connkey()
|
||||
def _using_events(self):
|
||||
return self.conn.using_node_device_events
|
||||
|
||||
|
@ -240,15 +240,15 @@ class vmmStoragePool(vmmLibvirtObject):
|
||||
# Volume handling #
|
||||
###################
|
||||
|
||||
def get_volume_by_name(self, name):
|
||||
for vol in self.get_volumes():
|
||||
if vol.get_name() == name:
|
||||
return vol
|
||||
|
||||
def get_volumes(self):
|
||||
self._update_volumes(force=False)
|
||||
return self._volumes[:]
|
||||
|
||||
def get_volume(self, key):
|
||||
for vol in self.get_volumes():
|
||||
if vol.get_connkey() == key:
|
||||
return vol
|
||||
|
||||
def _update_volumes(self, force):
|
||||
if not self.is_active():
|
||||
self._volumes = []
|
||||
@ -256,7 +256,7 @@ class vmmStoragePool(vmmLibvirtObject):
|
||||
if not force and self._volumes is not None:
|
||||
return
|
||||
|
||||
keymap = dict((o.get_connkey(), o) for o in self._volumes or [])
|
||||
keymap = dict((o.get_name(), o) for o in self._volumes or [])
|
||||
def cb(obj, key):
|
||||
return vmmStorageVolume(self.conn, obj, key)
|
||||
(dummy1, dummy2, allvols) = pollhelpers.fetch_volumes(
|
||||
|
@ -37,8 +37,8 @@ class vmmStorageBrowser(vmmGObjectUI):
|
||||
if not self._first_run:
|
||||
self._first_run = True
|
||||
pool = self.conn.get_default_pool()
|
||||
uiutil.set_list_selection(self.storagelist.widget("pool-list"),
|
||||
pool and pool.get_connkey() or None)
|
||||
uiutil.set_list_selection(
|
||||
self.storagelist.widget("pool-list"), pool)
|
||||
|
||||
self.topwin.set_transient_for(parent)
|
||||
self.topwin.present()
|
||||
|
@ -23,50 +23,50 @@ def _new_poll_helper(origmap, typename, list_cb, build_cb, support_cb):
|
||||
log.debug("Unable to list all %ss: %s", typename, e)
|
||||
|
||||
for obj in objs:
|
||||
connkey = obj.name()
|
||||
name = obj.name()
|
||||
|
||||
if connkey not in origmap:
|
||||
if name not in origmap:
|
||||
# Object is brand new this period
|
||||
current[connkey] = build_cb(obj, connkey)
|
||||
new[connkey] = current[connkey]
|
||||
current[name] = build_cb(obj, name)
|
||||
new[name] = current[name]
|
||||
else:
|
||||
# Previously known object
|
||||
current[connkey] = origmap[connkey]
|
||||
del(origmap[connkey])
|
||||
current[name] = origmap[name]
|
||||
del(origmap[name])
|
||||
|
||||
return (list(origmap.values()), list(new.values()), list(current.values()))
|
||||
|
||||
|
||||
def fetch_nets(backend, origmap, build_cb):
|
||||
name = "network"
|
||||
typename = "network"
|
||||
list_cb = backend.listAllNetworks
|
||||
support_cb = backend.support.conn_network
|
||||
return _new_poll_helper(origmap, name, list_cb, build_cb, support_cb)
|
||||
return _new_poll_helper(origmap, typename, list_cb, build_cb, support_cb)
|
||||
|
||||
|
||||
def fetch_pools(backend, origmap, build_cb):
|
||||
name = "pool"
|
||||
typename = "pool"
|
||||
list_cb = backend.listAllStoragePools
|
||||
support_cb = backend.support.conn_storage
|
||||
return _new_poll_helper(origmap, name, list_cb, build_cb, support_cb)
|
||||
return _new_poll_helper(origmap, typename, list_cb, build_cb, support_cb)
|
||||
|
||||
|
||||
def fetch_volumes(backend, pool, origmap, build_cb):
|
||||
name = "volume"
|
||||
typename = "volume"
|
||||
list_cb = pool.listAllVolumes
|
||||
support_cb = backend.support.conn_storage
|
||||
return _new_poll_helper(origmap, name, list_cb, build_cb, support_cb)
|
||||
return _new_poll_helper(origmap, typename, list_cb, build_cb, support_cb)
|
||||
|
||||
|
||||
def fetch_nodedevs(backend, origmap, build_cb):
|
||||
name = "nodedev"
|
||||
typename = "nodedev"
|
||||
list_cb = backend.listAllDevices
|
||||
support_cb = backend.support.conn_nodedev
|
||||
return _new_poll_helper(origmap, name, list_cb, build_cb, support_cb)
|
||||
return _new_poll_helper(origmap, typename, list_cb, build_cb, support_cb)
|
||||
|
||||
|
||||
def fetch_vms(backend, origmap, build_cb):
|
||||
name = "domain"
|
||||
typename = "domain"
|
||||
list_cb = backend.listAllDomains
|
||||
support_cb = backend.support.conn_domain
|
||||
return _new_poll_helper(origmap, name, list_cb, build_cb, support_cb)
|
||||
return _new_poll_helper(origmap, typename, list_cb, build_cb, support_cb)
|
||||
|
Loading…
x
Reference in New Issue
Block a user