diff --git a/virt-manager b/virt-manager index e157202c5..d352b90a8 100755 --- a/virt-manager +++ b/virt-manager @@ -121,7 +121,7 @@ def parse_commandline(): return parser.parse_known_args() -def launch_specific_window(engine, show, uri, uuid): +def launch_specific_window(engine, show, uri, clistr): if not show: return @@ -129,11 +129,11 @@ def launch_specific_window(engine, show, uri, uuid): if show == 'creator': engine.show_domain_creator(uri) elif show == 'editor': - engine.show_domain_editor(uri, uuid) + engine.show_domain_editor(uri, clistr) elif show == 'performance': - engine.show_domain_performance(uri, uuid) + engine.show_domain_performance(uri, clistr) elif show == 'console': - engine.show_domain_console(uri, uuid) + engine.show_domain_console(uri, clistr) elif show == 'summary': engine.show_host_summary(uri) diff --git a/virtManager/clone.py b/virtManager/clone.py index c748bd2b6..46b87a96f 100644 --- a/virtManager/clone.py +++ b/virtManager/clone.py @@ -304,7 +304,11 @@ class vmmCloneVM(vmmGObjectUI): label = _("Usermode") elif net_type == VirtualNetworkInterface.TYPE_VIRTUAL: - net = self.orig_vm.conn.get_net_by_name(net_dev) + net = None + for netobj in self.orig_vm.conn.list_nets(): + if netobj.get_name() == net_dev: + net = netobj + break if net: label = "" diff --git a/virtManager/connection.py b/virtManager/connection.py index 2a36b3bad..1e48ad91f 100644 --- a/virtManager/connection.py +++ b/virtManager/connection.py @@ -109,19 +109,19 @@ class vmmConnection(vmmGObject): self._xml_flags = {} # Physical network interfaces: name -> virtinst.NodeDevice - self.nodedevs = {} + self._nodedevs = {} # Physical network interfaces: name (eth0) -> vmmNetDevice - self.netdevs = {} + self._netdevs = {} # Physical media devices: vmmMediaDevice.key -> vmmMediaDevice - self.mediadevs = {} + self._mediadevs = {} # Connection Storage pools: name -> vmmInterface - self.interfaces = {} - # Connection Storage pools: UUID -> vmmStoragePool - self.pools = {} - # Virtual networks UUUID -> vmmNetwork object - self.nets = {} - # Virtual machines. UUID -> vmmDomain object - self.vms = {} + self._interfaces = {} + # Connection Storage pools: name -> vmmStoragePool + self._pools = {} + # Virtual networks: name -> vmmNetwork object + self._nets = {} + # Virtual machines: name -> vmmDomain object + self._vms = {} # Resource utilization statistics self.record = [] self.hostinfo = None @@ -171,14 +171,14 @@ class vmmConnection(vmmGObject): def _init_virtconn(self): self._backend.cb_fetch_all_guests = ( lambda: [obj.get_xmlobj(refresh_if_nec=False) - for obj in self.vms.values()]) + for obj in self._vms.values()]) self._backend.cb_fetch_all_pools = ( lambda: [obj.get_xmlobj(refresh_if_nec=False) - for obj in self.pools.values()]) + for obj in self._pools.values()]) def fetch_all_vols(): ret = [] - for pool in self.pools.values(): + for pool in self._pools.values(): for vol in pool.get_volumes(refresh=False).values(): try: ret.append(vol.get_xmlobj(refresh_if_nec=False)) @@ -290,14 +290,14 @@ class vmmConnection(vmmGObject): handle_id = vmmGObject.connect(self, name, callback, *args) if name == "vm-added": - for uuid in self.vms.keys(): - self.emit("vm-added", uuid) + for connkey in self._vms.keys(): + self.emit("vm-added", connkey) elif name == "mediadev-added": - for dev in self.mediadevs.values(): + for dev in self._mediadevs.values(): self.emit("mediadev-added", dev) elif name == "nodedev-added": - for key in self.nodedevs.keys(): - self.emit("nodedev-added", key) + for connkey in self._nodedevs.keys(): + self.emit("nodedev-added", connkey) return handle_id @@ -584,6 +584,23 @@ class vmmConnection(vmmGObject): return self._get_flags_helper(iface, key, check_func) + def get_default_pool(self): + for p in self._pools.values(): + if p.get_name() == "default": + return p + return None + + def get_vol_by_path(self, path): + # path_exists will handle stuff like refreshing a busted pool + if not virtinst.VirtualDisk.path_exists(self.get_backend(), path): + return None + + for pool in self._pools.values(): + for vol in pool.get_volumes().values(): + if vol.get_target_path() == path: + return vol + return None + ################################### # Connection state getter/setters # @@ -631,6 +648,7 @@ class vmmConnection(vmmGObject): def is_connecting(self): return self.state == self.STATE_CONNECTING + ################################# # Libvirt object lookup methods # ################################# @@ -662,7 +680,7 @@ class vmmConnection(vmmGObject): if mac: netdev_list[name].mac = mac - for name, iface in self.interfaces.items(): + for name, iface in self._interfaces.items(): interface_to_netdev(iface) for nodedev in self.get_nodedevs("net"): @@ -682,21 +700,37 @@ class vmmConnection(vmmGObject): # XXX: How to handle added/removed signals to clients? return netdev_list - def get_vm(self, uuid): - return self.vms[uuid] - def get_net(self, uuid): - return self.nets[uuid] - def get_net_device(self, path): - return self.netdevs[path] - def get_pool(self, uuid): - return self.pools[uuid] - def get_interface(self, name): - return self.interfaces[name] - def get_nodedev(self, name): - return self.nodedevs[name] + def list_netdevs(self): + # Update netdev list + if self.netdev_use_libvirt: + self._netdevs = self._build_libvirt_netdev_list() + return self._netdevs.values() + + def get_vm(self, connkey): + return self._vms[connkey] + def list_vms(self): + return self._vms.values() + + def get_net(self, connkey): + return self._nets[connkey] + def list_nets(self): + return self._nets.values() + + def get_pool(self, connkey): + return self._pools[connkey] + def list_pools(self): + return self._pools.values() + + def get_interface(self, connkey): + return self._interfaces[connkey] + def list_interfaces(self): + return self._interfaces.values() + + def get_nodedev(self, connkey): + return self._nodedevs[connkey] def get_nodedevs(self, devtype=None, devcap=None): retdevs = [] - for dev in self.nodedevs.values(): + for dev in self._nodedevs.values(): xmlobj = dev.get_xmlobj() if devtype and xmlobj.device_type != devtype: continue @@ -734,50 +768,6 @@ class vmmConnection(vmmGObject): return count - def get_net_by_name(self, name): - for net in self.nets.values(): - if net.get_name() == name: - return net - - def get_pool_by_path(self, path): - for pool in self.pools.values(): - if pool.get_target_path() == path: - return pool - return None - - def get_pool_by_name(self, name): - for p in self.pools.values(): - if p.get_name() == name: - return p - return None - def get_default_pool(self): - return self.get_pool_by_name("default") - - def get_vol_by_path(self, path): - # path_exists will handle stuff like refreshing a busted pool - if not virtinst.VirtualDisk.path_exists(self.get_backend(), path): - return None - - for pool in self.pools.values(): - for vol in pool.get_volumes().values(): - if vol.get_target_path() == path: - return vol - return None - - def list_vm_uuids(self): - return self.vms.keys() - def list_net_uuids(self): - return self.nets.keys() - def list_net_device_paths(self): - # Update netdev list - if self.netdev_use_libvirt: - self.netdevs = self._build_libvirt_netdev_list() - return self.netdevs.keys() - def list_pool_uuids(self): - return self.pools.keys() - def list_interface_names(self): - return self.interfaces.keys() - ################################### # Libvirt object creation methods # @@ -852,7 +842,7 @@ class vmmConnection(vmmGObject): ignore = conn ignore = args - obj = self.vms.get(domain.UUIDString(), None) + obj = self._vms.get(domain.name(), None) if not obj: return self.idle_add(obj.refresh_xml, True) @@ -861,7 +851,7 @@ class vmmConnection(vmmGObject): ignore = conn ignore = reason ignore = userdata - obj = self.vms.get(domain.UUIDString(), None) + obj = self._vms.get(domain.name(), None) if obj: # If the domain disappeared, this will catch it and trigger @@ -877,7 +867,7 @@ class vmmConnection(vmmGObject): ignore = conn ignore = reason ignore = userdata - obj = self.nets.get(network.UUIDString(), None) + obj = self._nets.get(network.name(), None) if obj: self.idle_add(obj.force_update_status, True) @@ -942,7 +932,7 @@ class vmmConnection(vmmGObject): #################### def _nodedev_mediadev_added(self, ignore1, name): - if name in self.mediadevs: + if name in self._mediadevs: return vobj = self.get_nodedev(name) @@ -950,16 +940,16 @@ class vmmConnection(vmmGObject): if not mediadev: return - self.mediadevs[name] = mediadev + self._mediadevs[name] = mediadev logging.debug("mediadev=%s added", name) self.emit("mediadev-added", mediadev) def _nodedev_mediadev_removed(self, ignore1, name): - if name not in self.mediadevs: + if name not in self._mediadevs: return - self.mediadevs[name].cleanup() - del(self.mediadevs[name]) + self._mediadevs[name].cleanup() + del(self._mediadevs[name]) logging.debug("mediadev=%s removed", name) self.emit("mediadev-removed", name) @@ -1001,26 +991,26 @@ class vmmConnection(vmmGObject): self._backend.close() self.record = [] - cleanup(self.nodedevs) - self.nodedevs = {} + cleanup(self._nodedevs) + self._nodedevs = {} - cleanup(self.netdevs) - self.netdevs = {} + cleanup(self._netdevs) + self._netdevs = {} - cleanup(self.mediadevs) - self.mediadevs = {} + cleanup(self._mediadevs) + self._mediadevs = {} - cleanup(self.interfaces) - self.interfaces = {} + cleanup(self._interfaces) + self._interfaces = {} - cleanup(self.pools) - self.pools = {} + cleanup(self._pools) + self._pools = {} - cleanup(self.nets) - self.nets = {} + cleanup(self._nets) + self._nets = {} - cleanup(self.vms) - self.vms = {} + cleanup(self._vms) + self._vms = {} self._change_state(self.STATE_DISCONNECTED) self._closing = False @@ -1146,33 +1136,33 @@ class vmmConnection(vmmGObject): def _update_nets(self, dopoll): if not dopoll or not self.is_network_capable(): - return {}, {}, self.nets - return pollhelpers.fetch_nets(self._backend, self.nets.copy(), + return {}, {}, self._nets + return pollhelpers.fetch_nets(self._backend, self._nets.copy(), (lambda obj, key: vmmNetwork(self, obj, key))) def _update_pools(self, dopoll): if not dopoll or not self.is_storage_capable(): - return {}, {}, self.pools - return pollhelpers.fetch_pools(self._backend, self.pools.copy(), + return {}, {}, self._pools + return pollhelpers.fetch_pools(self._backend, self._pools.copy(), (lambda obj, key: vmmStoragePool(self, obj, key))) def _update_interfaces(self, dopoll): if not dopoll or not self.is_interface_capable(): - return {}, {}, self.interfaces + return {}, {}, self._interfaces return pollhelpers.fetch_interfaces(self._backend, - self.interfaces.copy(), + self._interfaces.copy(), (lambda obj, key: vmmInterface(self, obj, key))) def _update_nodedevs(self, dopoll): if not dopoll or not self.is_nodedev_capable(): - return {}, {}, self.nodedevs - return pollhelpers.fetch_nodedevs(self._backend, self.nodedevs.copy(), + return {}, {}, self._nodedevs + return pollhelpers.fetch_nodedevs(self._backend, self._nodedevs.copy(), (lambda obj, key: vmmNodeDevice(self, obj, key))) def _update_vms(self, dopoll): if not dopoll: - return {}, {}, self.vms - return pollhelpers.fetch_vms(self._backend, self.vms.copy(), + return {}, {}, self._vms + return pollhelpers.fetch_vms(self._backend, self._vms.copy(), (lambda obj, key: vmmDomain(self, obj, key))) @@ -1263,15 +1253,15 @@ class vmmConnection(vmmGObject): return if pollvm: - self.vms = vms + self._vms = vms if pollnet: - self.nets = nets + self._nets = nets if polliface: - self.interfaces = interfaces + self._interfaces = interfaces if pollpool: - self.pools = pools + self._pools = pools if pollnodedev: - self.nodedevs = nodedevs + self._nodedevs = nodedevs # Make sure device polling is setup if not self.netdev_initialized: @@ -1281,40 +1271,40 @@ class vmmConnection(vmmGObject): self._init_mediadev() # Update VM states - for uuid, obj in goneVMs.items(): + for connkey, obj in goneVMs.items(): logging.debug("domain=%s removed", obj.get_name()) - self.emit("vm-removed", uuid) + self.emit("vm-removed", connkey) obj.cleanup() - for uuid, obj in newVMs.items(): + for connkey, obj in newVMs.items(): logging.debug("domain=%s status=%s added", obj.get_name(), obj.run_status()) - self.emit("vm-added", uuid) + self.emit("vm-added", connkey) # Update virtual network states - for uuid, obj in goneNets.items(): + for connkey, obj in goneNets.items(): logging.debug("network=%s removed", obj.get_name()) - self.emit("net-removed", uuid) + self.emit("net-removed", connkey) obj.cleanup() - for uuid, obj in newNets.items(): + for connkey, obj in newNets.items(): logging.debug("network=%s added", obj.get_name()) obj.connect("started", self._obj_signal_proxy, - "net-started", uuid) + "net-started", connkey) obj.connect("stopped", self._obj_signal_proxy, - "net-stopped", uuid) - self.emit("net-added", uuid) + "net-stopped", connkey) + self.emit("net-added", connkey) # Update storage pool states - for uuid, obj in gonePools.items(): + for connkey, obj in gonePools.items(): logging.debug("pool=%s removed", obj.get_name()) - self.emit("pool-removed", uuid) + self.emit("pool-removed", connkey) obj.cleanup() - for uuid, obj in newPools.items(): + for connkey, obj in newPools.items(): logging.debug("pool=%s added", obj.get_name()) obj.connect("started", self._obj_signal_proxy, - "pool-started", uuid) + "pool-started", connkey) obj.connect("stopped", self._obj_signal_proxy, - "pool-stopped", uuid) - self.emit("pool-added", uuid) + "pool-stopped", connkey) + self.emit("pool-added", connkey) # Update interface states for name, obj in goneInterfaces.items(): @@ -1361,7 +1351,7 @@ class vmmConnection(vmmGObject): if pollnodedev: add_to_ticklist(nodedevs.values()) if pollmedia: - add_to_ticklist(self.mediadevs.values()) + add_to_ticklist(self._mediadevs.values()) for obj, args in ticklist: try: diff --git a/virtManager/create.py b/virtManager/create.py index a9c77b8b6..8cd42e301 100644 --- a/virtManager/create.py +++ b/virtManager/create.py @@ -1105,7 +1105,7 @@ class vmmCreate(vmmGObjectUI): if row: ntype = row[0] - key = row[6] + connkey = row[6] expand = (ntype != "network" and ntype != "bridge") if (ntype is None or @@ -1114,7 +1114,7 @@ class vmmCreate(vmmGObjectUI): elif ntype != virtinst.VirtualNetworkInterface.TYPE_VIRTUAL: show_pxe_warn = False else: - obj = self.conn.get_net(key) + obj = self.conn.get_net(connkey) show_pxe_warn = not obj.can_pxe() show_warn = (show_pxe_warn and pxe_install) @@ -1478,7 +1478,7 @@ class vmmCreate(vmmGObjectUI): self.conn.get_backend().lookupByName, start_num=force_num and 1 or 2, force_num=force_num, sep=not force_num and "-" or "", - collidelist=[vm.get_name() for vm in self.conn.vms.values()]) + collidelist=[vm.get_name() for vm in self.conn.list_vms()]) def validate_install_page(self): instmethod = self.get_config_install_page() @@ -1827,7 +1827,7 @@ class vmmCreate(vmmGObjectUI): self.close() # Launch details dialog for new VM - self.emit("action-show-domain", self.conn.get_uri(), self.guest.uuid) + self.emit("action-show-domain", self.conn.get_uri(), self.guest.name) def start_install(self, guest): @@ -1854,11 +1854,20 @@ class vmmCreate(vmmGObjectUI): # Wait for VM to show up self.conn.schedule_priority_tick(pollvm=True) count = 0 - while (guest.uuid not in self.conn.vms) and (count < 100): + foundvm = None + while count < 100: + for vm in self.conn.list_vms(): + if vm.get_uuid() == guest.uuid: + foundvm = vm + if foundvm: + break count += 1 time.sleep(.1) - vm = self.conn.get_vm(guest.uuid) + if not foundvm: + raise RuntimeError( + _("VM '%s' didn't show up after expected time.") % guest.name) + vm = foundvm vm.tick() if vm.is_shutoff(): diff --git a/virtManager/createinterface.py b/virtManager/createinterface.py index 8368a998c..5faa6ee9a 100644 --- a/virtManager/createinterface.py +++ b/virtManager/createinterface.py @@ -166,8 +166,7 @@ class vmmCreateInterface(vmmGObjectUI): @staticmethod def iface_in_use_by(conn, name): use_str = "" - for i in conn.list_interface_names(): - iface = conn.get_interface(i) + for iface in conn.list_interfaces(): if name in iface.get_slave_names(): if use_str: use_str += ", " @@ -509,8 +508,8 @@ class vmmCreateInterface(vmmGObjectUI): phys.address] row_dict = {} - for name in self.conn.list_interface_names(): - iface = self.conn.get_interface(name) + for iface in self.conn.list_interfaces(): + name = iface.get_name() key = iface.get_xmlobj() iface_type = iface.get_type() active = iface.is_active() diff --git a/virtManager/createnet.py b/virtManager/createnet.py index 7e12527f4..e99939296 100644 --- a/virtManager/createnet.py +++ b/virtManager/createnet.py @@ -169,10 +169,9 @@ class vmmCreateNetwork(vmmGObjectUI): fw_model = self.widget("net-forward").get_model() fw_model.clear() fw_model.append([_("Any physical device"), None]) - for path in self.conn.list_net_device_paths(): - net = self.conn.get_net_device(path) - fw_model.append([_("Physical device %s") % (net.get_name()), - net.get_name()]) + for netdev in self.conn.list_netdevs(): + fw_model.append([_("Physical device %s") % (netdev.get_name()), + netdev.get_name()]) self.widget("net-forward").set_active(0) self.widget("net-forward-mode").set_active(0) diff --git a/virtManager/details.py b/virtManager/details.py index b6b7fe2dd..ae727a5db 100644 --- a/virtManager/details.py +++ b/virtManager/details.py @@ -1453,11 +1453,11 @@ class vmmDetails(vmmGObjectUI): if not self.vm.is_paused(): self.emit("action-suspend-domain", self.vm.conn.get_uri(), - self.vm.get_uuid()) + self.vm.get_connkey()) else: self.emit("action-resume-domain", self.vm.conn.get_uri(), - self.vm.get_uuid()) + self.vm.get_connkey()) def control_vm_menu(self, src_ignore): can_usb = bool(self.console.viewer and @@ -1467,39 +1467,39 @@ class vmmDetails(vmmGObjectUI): def control_vm_run(self, src_ignore): self.emit("action-run-domain", - self.vm.conn.get_uri(), self.vm.get_uuid()) + self.vm.conn.get_uri(), self.vm.get_connkey()) def control_vm_shutdown(self, src_ignore): self.emit("action-shutdown-domain", - self.vm.conn.get_uri(), self.vm.get_uuid()) + self.vm.conn.get_uri(), self.vm.get_connkey()) def control_vm_reboot(self, src_ignore): self.emit("action-reboot-domain", - self.vm.conn.get_uri(), self.vm.get_uuid()) + self.vm.conn.get_uri(), self.vm.get_connkey()) def control_vm_save(self, src_ignore): self.emit("action-save-domain", - self.vm.conn.get_uri(), self.vm.get_uuid()) + self.vm.conn.get_uri(), self.vm.get_connkey()) def control_vm_reset(self, src_ignore): self.emit("action-reset-domain", - self.vm.conn.get_uri(), self.vm.get_uuid()) + self.vm.conn.get_uri(), self.vm.get_connkey()) def control_vm_destroy(self, src_ignore): self.emit("action-destroy-domain", - self.vm.conn.get_uri(), self.vm.get_uuid()) + self.vm.conn.get_uri(), self.vm.get_connkey()) def control_vm_clone(self, src_ignore): self.emit("action-clone-domain", - self.vm.conn.get_uri(), self.vm.get_uuid()) + self.vm.conn.get_uri(), self.vm.get_connkey()) def control_vm_migrate(self, src_ignore): self.emit("action-migrate-domain", - self.vm.conn.get_uri(), self.vm.get_uuid()) + self.vm.conn.get_uri(), self.vm.get_connkey()) def control_vm_delete(self, src_ignore): self.emit("action-delete-domain", - self.vm.conn.get_uri(), self.vm.get_uuid()) + self.vm.conn.get_uri(), self.vm.get_connkey()) def control_vm_screenshot(self, src): ignore = src diff --git a/virtManager/domain.py b/virtManager/domain.py index 7f736d9ea..5ab2c51e4 100644 --- a/virtManager/domain.py +++ b/virtManager/domain.py @@ -171,8 +171,9 @@ class vmmDomainSnapshot(vmmLibvirtObject): self.refresh_xml() - def get_name(self): - return self.get_xmlobj().name + def _backend_get_name(self): + return self._backend.getName() + def _XMLDesc(self, flags): return self._backend.getXMLDesc(flags=flags) @@ -282,7 +283,6 @@ class vmmDomain(vmmLibvirtObject): def __init__(self, conn, backend, key): vmmLibvirtObject.__init__(self, conn, backend, key, Guest) - self.uuid = key self.cloning = False self.record = [] @@ -297,7 +297,7 @@ class vmmDomain(vmmLibvirtObject): self.reboot_listener = None self._is_management_domain = None self._id = None - self._name = None + self._uuid = None self._snapshot_list = None self.lastStatus = libvirt.VIR_DOMAIN_SHUTOFF @@ -412,11 +412,6 @@ class vmmDomain(vmmLibvirtObject): def _using_events(self): return self.conn.using_domain_events - def get_name(self): - if self._name is None: - self._name = self._backend.name() - return self._name - def get_id(self): if self._id is None: self._id = self._backend.ID() @@ -1066,7 +1061,9 @@ class vmmDomain(vmmLibvirtObject): return self.get_xmlobj().os.is_hvm() def get_uuid(self): - return self.uuid + if self._uuid is None: + self._uuid = self._backend.UUIDString() + return self._uuid def get_abi_type(self): return self.get_xmlobj().os.os_type def get_hv_type(self): @@ -1732,37 +1729,37 @@ class vmmDomain(vmmLibvirtObject): ################## def on_console_scaling_changed(self, *args, **kwargs): - return self.config.listen_pervm(self.uuid, "/scaling", + return self.config.listen_pervm(self.get_uuid(), "/scaling", *args, **kwargs) def set_console_scaling(self, value): - self.config.set_pervm(self.uuid, "/scaling", value) + self.config.set_pervm(self.get_uuid(), "/scaling", value) def get_console_scaling(self): - ret = self.config.get_pervm(self.uuid, "/scaling") + ret = self.config.get_pervm(self.get_uuid(), "/scaling") if ret == -1: return self.config.get_console_scaling() return ret def on_console_resizeguest_changed(self, *args, **kwargs): - return self.config.listen_pervm(self.uuid, "/resize-guest", + return self.config.listen_pervm(self.get_uuid(), "/resize-guest", *args, **kwargs) def set_console_resizeguest(self, value): - self.config.set_pervm(self.uuid, "/resize-guest", value) + self.config.set_pervm(self.get_uuid(), "/resize-guest", value) def get_console_resizeguest(self): - ret = self.config.get_pervm(self.uuid, "/resize-guest") + ret = self.config.get_pervm(self.get_uuid(), "/resize-guest") if ret == -1: return self.config.get_console_resizeguest() return ret def set_details_window_size(self, w, h): - self.config.set_pervm(self.uuid, "/vm-window-size", (w, h)) + self.config.set_pervm(self.get_uuid(), "/vm-window-size", (w, h)) def get_details_window_size(self): - ret = self.config.get_pervm(self.uuid, "/vm-window-size") + ret = self.config.get_pervm(self.get_uuid(), "/vm-window-size") return ret def get_console_password(self): - return self.config.get_pervm(self.uuid, "/console-password") + return self.config.get_pervm(self.get_uuid(), "/console-password") def set_console_password(self, username, keyid): - return self.config.set_pervm(self.uuid, "/console-password", + return self.config.set_pervm(self.get_uuid(), "/console-password", (username, keyid)) def get_cache_dir(self): @@ -1963,6 +1960,8 @@ class vmmDomainVirtinst(vmmDomain): def get_name(self): return self._backend.name + def get_uuid(self): + return self._backend.uuid def get_id(self): return -1 def hasSavedImage(self): diff --git a/virtManager/engine.py b/virtManager/engine.py index 7aff87004..d3a49d7dc 100644 --- a/virtManager/engine.py +++ b/virtManager/engine.py @@ -274,13 +274,13 @@ class vmmEngine(vmmGObject): thread.start() - def _do_vm_removed(self, conn, vmuuid): + def _do_vm_removed(self, conn, connkey): hvuri = conn.get_uri() - if vmuuid not in self.conns[hvuri]["windowDetails"]: + if connkey not in self.conns[hvuri]["windowDetails"]: return - self.conns[hvuri]["windowDetails"][vmuuid].cleanup() - del(self.conns[hvuri]["windowDetails"][vmuuid]) + self.conns[hvuri]["windowDetails"][connkey].cleanup() + del(self.conns[hvuri]["windowDetails"][connkey]) def _do_conn_changed(self, conn): if (conn.get_state() == conn.STATE_ACTIVE or @@ -289,9 +289,9 @@ class vmmEngine(vmmGObject): hvuri = conn.get_uri() - for vmuuid in self.conns[hvuri]["windowDetails"].keys(): - self.conns[hvuri]["windowDetails"][vmuuid].cleanup() - del(self.conns[hvuri]["windowDetails"][vmuuid]) + for connkey in self.conns[hvuri]["windowDetails"].keys(): + self.conns[hvuri]["windowDetails"][connkey].cleanup() + del(self.conns[hvuri]["windowDetails"][connkey]) if (self.windowCreate and self.windowCreate.conn and @@ -671,8 +671,8 @@ class vmmEngine(vmmGObject): if self.conns[uri]["windowHost"]: return self.conns[uri]["windowHost"] - con = self._lookup_conn(uri) - obj = vmmHost(con) + conn = self._lookup_conn(uri) + obj = vmmHost(conn) obj.connect("action-exit-app", self.exit_app) obj.connect("action-view-manager", self._do_show_manager) @@ -722,13 +722,13 @@ class vmmEngine(vmmGObject): self.remove_conn(None, connection.get_uri()) - def _get_details_dialog(self, uri, uuid): - if uuid in self.conns[uri]["windowDetails"]: - return self.conns[uri]["windowDetails"][uuid] + def _get_details_dialog(self, uri, connkey): + if connkey in self.conns[uri]["windowDetails"]: + return self.conns[uri]["windowDetails"][connkey] - con = self._lookup_conn(uri) + conn = self._lookup_conn(uri) - obj = vmmDetails(con.get_vm(uuid)) + obj = vmmDetails(conn.get_vm(connkey)) obj.connect("action-save-domain", self._do_save_domain) obj.connect("action-destroy-domain", self._do_destroy_domain) obj.connect("action-reset-domain", self._do_reset_domain) @@ -745,33 +745,12 @@ class vmmEngine(vmmGObject): obj.connect("details-opened", self.increment_window_counter) obj.connect("details-closed", self.decrement_window_counter) - self.conns[uri]["windowDetails"][uuid] = obj - return self.conns[uri]["windowDetails"][uuid] + self.conns[uri]["windowDetails"][connkey] = obj + return self.conns[uri]["windowDetails"][connkey] - def _find_vm_by_id(self, uri, domstr): - vms = self.conns[uri]["conn"].vms - if domstr in vms: - return domstr - for vm in vms.values(): - if domstr.isdigit(): - if int(domstr) == vm.get_id(): - return vm.get_uuid() - elif domstr == vm.get_name(): - return vm.get_uuid() - - def _show_vm_helper(self, src, uri, domstr, page=None, forcepage=False): + def _show_vm_helper(self, src, uri, vm, page, forcepage): try: - uuid = self._find_vm_by_id(uri, domstr) - if not uuid: - # This will only happen if --show-* option was used during - # virt-manager launch and an invalid UUID is passed. - # The error message must be sync otherwise the user will not - # know why the application ended. - self.err.show_err("%s does not have VM '%s'" % - (uri, domstr), modal=True) - return - - details = self._get_details_dialog(uri, uuid) + details = self._get_details_dialog(uri, vm.get_connkey()) if forcepage or not details.is_visible(): if page == DETAILS_PERF: @@ -790,8 +769,10 @@ class vmmEngine(vmmGObject): if self._can_exit(): self.idle_add(self.exit_app, src) - def _do_show_vm(self, src, uri, uuid): - self._show_vm_helper(src, uri, uuid) + def _do_show_vm(self, src, uri, connkey): + conn = self._lookup_conn(uri) + vm = conn.get_vm(connkey) + self._show_vm_helper(src, uri, vm, None, False) def get_manager(self): if self.windowManager: @@ -858,10 +839,10 @@ class vmmEngine(vmmGObject): except Exception, e: src.err.show_err(_("Error launching manager: %s") % str(e)) - def _do_show_migrate(self, src, uri, uuid): + def _do_show_migrate(self, src, uri, connkey): try: conn = self._lookup_conn(uri) - vm = conn.get_vm(uuid) + vm = conn.get_vm(connkey) if not self.windowMigrate: self.windowMigrate = vmmMigrateDialog(vm, self) @@ -871,9 +852,9 @@ class vmmEngine(vmmGObject): except Exception, e: src.err.show_err(_("Error launching migrate dialog: %s") % str(e)) - def _do_show_clone(self, src, uri, uuid): - con = self._lookup_conn(uri) - orig_vm = con.get_vm(uuid) + def _do_show_clone(self, src, uri, connkey): + conn = self._lookup_conn(uri) + orig_vm = conn.get_vm(connkey) clone_window = self.conns[uri]["windowClone"] try: @@ -901,26 +882,51 @@ class vmmEngine(vmmGObject): self.show_manager() self._do_show_create(self.get_manager(), uri) - def show_domain_console(self, uri, uuid): - self.idle_add(self._show_vm_helper, self.get_manager(), uri, uuid, - page=DETAILS_CONSOLE, forcepage=True) - def show_domain_editor(self, uri, uuid): - self.idle_add(self._show_vm_helper, self.get_manager(), uri, uuid, - page=DETAILS_CONFIG, forcepage=True) + def _find_vm_by_cli_str(self, uri, clistr): + """ + Lookup a VM by a string passed in on the CLI. Can be either + ID, domain name, or UUID + """ + if clistr.isdigit(): + clistr = int(clistr) - def show_domain_performance(self, uri, uuid): - self.idle_add(self._show_vm_helper, self.get_manager(), uri, uuid, - page=DETAILS_PERF, forcepage=True) + for vm in self.conns[uri]["conn"].list_vms(): + if clistr == vm.get_id(): + return vm + elif clistr == vm.get_name(): + return vm + elif clistr == vm.get_uuid(): + return vm + + def _cli_show_vm_helper(self, uri, clistr, page): + src = self.get_manager() + + vm = self._find_vm_by_cli_str(uri, clistr) + if not vm: + src.err.show_err("%s does not have VM '%s'" % + (uri, clistr), modal=True) + return + + self._show_vm_helper(src, uri, vm, page, True) + + def show_domain_console(self, uri, clistr): + self.idle_add(self._cli_show_vm_helper, uri, clistr, DETAILS_CONSOLE) + + def show_domain_editor(self, uri, clistr): + self.idle_add(self._cli_show_vm_helper, uri, clistr, DETAILS_CONFIG) + + def show_domain_performance(self, uri, clistr): + self.idle_add(self._cli_show_vm_helper, uri, clistr, DETAILS_PERF) ####################################### # Domain actions run/destroy/save ... # ####################################### - def _do_save_domain(self, src, uri, uuid): + def _do_save_domain(self, src, uri, connkey): conn = self._lookup_conn(uri) - vm = conn.get_vm(uuid) + vm = conn.get_vm(connkey) managed = bool(vm.managedsave_supported) if not managed and conn.is_remote(): @@ -993,9 +999,9 @@ class vmmEngine(vmmGObject): vmmAsyncJob.simple_async_noshow(conn.restore, [path], src, _("Error restoring domain")) - def _do_destroy_domain(self, src, uri, uuid): + def _do_destroy_domain(self, src, uri, connkey): conn = self._lookup_conn(uri) - vm = conn.get_vm(uuid) + vm = conn.get_vm(connkey) if not src.err.chkbox_helper( self.config.get_confirm_forcepoweroff, @@ -1010,9 +1016,9 @@ class vmmEngine(vmmGObject): vmmAsyncJob.simple_async_noshow(vm.destroy, [], src, _("Error shutting down domain")) - def _do_suspend_domain(self, src, uri, uuid): + def _do_suspend_domain(self, src, uri, connkey): conn = self._lookup_conn(uri) - vm = conn.get_vm(uuid) + vm = conn.get_vm(connkey) if not src.err.chkbox_helper(self.config.get_confirm_pause, self.config.set_confirm_pause, @@ -1024,17 +1030,17 @@ class vmmEngine(vmmGObject): vmmAsyncJob.simple_async_noshow(vm.suspend, [], src, _("Error pausing domain")) - def _do_resume_domain(self, src, uri, uuid): + def _do_resume_domain(self, src, uri, connkey): conn = self._lookup_conn(uri) - vm = conn.get_vm(uuid) + vm = conn.get_vm(connkey) logging.debug("Unpausing vm '%s'", vm.get_name()) vmmAsyncJob.simple_async_noshow(vm.resume, [], src, _("Error unpausing domain")) - def _do_run_domain(self, src, uri, uuid): + def _do_run_domain(self, src, uri, connkey): conn = self._lookup_conn(uri) - vm = conn.get_vm(uuid) + vm = conn.get_vm(connkey) logging.debug("Starting vm '%s'", vm.get_name()) @@ -1057,7 +1063,7 @@ class vmmEngine(vmmGObject): try: vm.removeSavedImage() - self._do_run_domain(src, uri, uuid) + self._do_run_domain(src, uri, connkey) except Exception, e: src.err.show_err(_("Error removing domain state: %s") % str(e)) @@ -1073,9 +1079,9 @@ class vmmEngine(vmmGObject): errorintro = _("Error starting domain") vmmAsyncJob.simple_async_noshow(vm.startup, [], src, errorintro) - def _do_shutdown_domain(self, src, uri, uuid): + def _do_shutdown_domain(self, src, uri, connkey): conn = self._lookup_conn(uri) - vm = conn.get_vm(uuid) + vm = conn.get_vm(connkey) if not src.err.chkbox_helper(self.config.get_confirm_poweroff, self.config.set_confirm_poweroff, @@ -1087,9 +1093,9 @@ class vmmEngine(vmmGObject): vmmAsyncJob.simple_async_noshow(vm.shutdown, [], src, _("Error shutting down domain")) - def _do_reboot_domain(self, src, uri, uuid): + def _do_reboot_domain(self, src, uri, connkey): conn = self._lookup_conn(uri) - vm = conn.get_vm(uuid) + vm = conn.get_vm(connkey) if not src.err.chkbox_helper(self.config.get_confirm_poweroff, self.config.set_confirm_poweroff, @@ -1126,9 +1132,9 @@ class vmmEngine(vmmGObject): vmmAsyncJob.simple_async_noshow(reboot_cb, [], src, "") - def _do_reset_domain(self, src, uri, uuid): + def _do_reset_domain(self, src, uri, connkey): conn = self._lookup_conn(uri) - vm = conn.get_vm(uuid) + vm = conn.get_vm(connkey) if not src.err.chkbox_helper( self.config.get_confirm_forcepoweroff, @@ -1143,9 +1149,9 @@ class vmmEngine(vmmGObject): vmmAsyncJob.simple_async_noshow(vm.reset, [], src, _("Error resetting domain")) - def _do_delete_domain(self, src, uri, uuid): + def _do_delete_domain(self, src, uri, connkey): conn = self._lookup_conn(uri) - vm = conn.get_vm(uuid) + vm = conn.get_vm(connkey) try: if not self.delete_dialog: diff --git a/virtManager/host.py b/virtManager/host.py index 0884834ee..416692960 100644 --- a/virtManager/host.py +++ b/virtManager/host.py @@ -505,25 +505,25 @@ class vmmHost(vmmGObjectUI): self.enable_net_apply(EDIT_NET_AUTOSTART) def current_network(self): - key = uiutil.get_list_selection(self.widget("net-list"), 0) + connkey = uiutil.get_list_selection(self.widget("net-list"), 0) try: - return key and self.conn.get_net(key) + return connkey and self.conn.get_net(connkey) except KeyError: return None - def refresh_network(self, src_ignore, uuid): + def refresh_network(self, src_ignore, connkey): uilist = self.widget("net-list") sel = uilist.get_selection() model, treeiter = sel.get_selected() - net = self.conn.get_net(uuid) + net = self.conn.get_net(connkey) net.tick() for row in uilist.get_model(): - if row[0] == uuid: + if row[0] == connkey: row[4] = net.is_active() if treeiter is not None: - if model[treeiter][0] == uuid: + if model[treeiter][0] == connkey: self.net_selected(sel) def set_net_error_page(self, msg): @@ -538,8 +538,10 @@ class vmmHost(vmmGObjectUI): return self.widget("network-pages").set_current_page(0) + connkey = model[treeiter][0] + try: - net = self.conn.get_net(model[treeiter][0]) + net = self.conn.get_net(connkey) except KeyError: self.disable_net_apply() return @@ -670,7 +672,9 @@ class vmmHost(vmmGObjectUI): _("Isolated network")) self.disable_net_apply() - def repopulate_networks(self, src_ignore=None, uuid_ignore=None): + def repopulate_networks(self, src=None, connkey=None): + ignore = src + ignore = connkey self.populate_networks(self.widget("net-list").get_model()) def populate_networks(self, model): @@ -679,14 +683,13 @@ class vmmHost(vmmGObjectUI): net_list = self.widget("net-list") net_list.get_selection().unselect_all() model.clear() - for uuid in self.conn.list_net_uuids(): - net = self.conn.get_net(uuid) - model.append([uuid, net.get_name(), "network-idle", + for net in self.conn.list_nets(): + model.append([net.get_connkey(), net.get_name(), "network-idle", Gtk.IconSize.LARGE_TOOLBAR, bool(net.is_active())]) uiutil.set_row_selection(net_list, - curnet and curnet.get_uuid() or None) + curnet and curnet.get_connkey() or None) # ------------------------------ @@ -800,12 +803,12 @@ class vmmHost(vmmGObjectUI): if cp is None: return cp.refresh() - self.refresh_storage_pool(None, cp.get_uuid()) + self.refresh_storage_pool(None, cp.get_connkey()) def current_pool(self): - key = uiutil.get_list_selection(self.widget("pool-list"), 0) + connkey = uiutil.get_list_selection(self.widget("pool-list"), 0) try: - return key and self.conn.get_pool(key) + return connkey and self.conn.get_pool(connkey) except KeyError: return None @@ -814,9 +817,9 @@ class vmmHost(vmmGObjectUI): if not pool: return None - key = uiutil.get_list_selection(self.widget("vol-list"), 0) + connkey = uiutil.get_list_selection(self.widget("vol-list"), 0) try: - return key and pool.get_volume(key) + return connkey and pool.get_volume(connkey) except KeyError: return None @@ -867,17 +870,17 @@ class vmmHost(vmmGObjectUI): return self.widget("storage-pages").set_current_page(0) - uuid = model[treeiter][0] + connkey = model[treeiter][0] try: - self.populate_pool_state(uuid) + self.populate_pool_state(connkey) except Exception, e: logging.exception(e) self.set_storage_error_page(_("Error selecting pool: %s") % e) self.disable_pool_apply() - def populate_pool_state(self, uuid): - pool = self.conn.get_pool(uuid) + def populate_pool_state(self, connkey): + pool = self.conn.get_pool(connkey) pool.tick() auto = pool.get_autostart() active = pool.is_active() @@ -919,10 +922,11 @@ class vmmHost(vmmGObjectUI): self.widget("vol-add").set_tooltip_text( _("Pool does not support volume creation")) - def refresh_storage_pool(self, src_ignore, uuid): - refresh_pool_in_list(self.widget("pool-list"), self.conn, uuid) + def refresh_storage_pool(self, src, connkey): + ignore = src + refresh_pool_in_list(self.widget("pool-list"), self.conn, connkey) curpool = self.current_pool() - if curpool.get_uuid() != uuid: + if curpool.get_connkey() != connkey: return # Currently selected pool changed state: force a 'pool_selected' to @@ -976,7 +980,9 @@ class vmmHost(vmmGObjectUI): clipboard.set_text(target_path, -1) - def repopulate_storage_pools(self, src_ignore=None, uuid_ignore=None): + def repopulate_storage_pools(self, src=None, connkey=None): + ignore = src + ignore = connkey pool_list = self.widget("pool-list") populate_storage_pools(pool_list, self.conn, self.current_pool()) @@ -1053,9 +1059,9 @@ class vmmHost(vmmGObjectUI): self.refresh_interface(None, cp.get_name()) def current_interface(self): - key = uiutil.get_list_selection(self.widget("interface-list"), 0) + connkey = uiutil.get_list_selection(self.widget("interface-list"), 0) try: - return key and self.conn.get_interface(key) + return connkey and self.conn.get_interface(connkey) except KeyError: return None @@ -1094,10 +1100,10 @@ class vmmHost(vmmGObjectUI): return self.widget("interface-pages").set_current_page(INTERFACE_PAGE_INFO) - name = model[treeiter][0] + connkey = model[treeiter][0] try: - self.populate_interface_state(name) + self.populate_interface_state(connkey) except Exception, e: logging.exception(e) self.set_interface_error_page(_("Error selecting interface: %s") % @@ -1105,8 +1111,9 @@ class vmmHost(vmmGObjectUI): self.widget("interface-apply").set_sensitive(False) - def populate_interface_state(self, name): - interface = self.conn.get_interface(name) + def populate_interface_state(self, connkey): + interface = self.conn.get_interface(connkey) + name = interface.get_name() children = interface.get_slaves() itype = interface.get_type() mac = interface.get_mac() @@ -1189,11 +1196,14 @@ class vmmHost(vmmGObjectUI): self.widget("interface-child-box").set_visible(show_child) self.populate_interface_children() - def refresh_interface(self, src_ignore, name): + def refresh_interface(self, src, connkey): + ignore = src + iface_list = self.widget("interface-list") sel = iface_list.get_selection() model, treeiter = sel.get_selected() - iface = self.conn.get_interface(name) + iface = self.conn.get_interface(connkey) + name = iface.get_name() iface.tick() for row in iface_list.get_model(): @@ -1211,7 +1221,9 @@ class vmmHost(vmmGObjectUI): self.widget("interface-start").set_sensitive(False) self.widget("interface-apply").set_sensitive(False) - def repopulate_interfaces(self, src_ignore=None, name_ignore=None): + def repopulate_interfaces(self, src=None, connkey=None): + ignore = src + ignore = connkey interface_list = self.widget("interface-list") self.populate_interfaces(interface_list.get_model()) @@ -1221,14 +1233,13 @@ class vmmHost(vmmGObjectUI): iface_list = self.widget("interface-list") iface_list.get_selection().unselect_all() model.clear() - for name in self.conn.list_interface_names(): - iface = self.conn.get_interface(name) - model.append([name, iface.get_name(), "network-idle", - Gtk.IconSize.LARGE_TOOLBAR, + for iface in self.conn.list_interfaces(): + model.append([iface.get_connkey(), iface.get_name(), + "network-idle", Gtk.IconSize.LARGE_TOOLBAR, bool(iface.is_active())]) uiutil.set_row_selection(iface_list, - curiface and curiface.get_name() or None) + curiface and curiface.get_connkey() or None) def populate_interface_children(self): interface = self.current_interface() @@ -1266,13 +1277,15 @@ def init_pool_list(pool_list, changed_func): poolListModel.set_sort_column_id(1, Gtk.SortType.ASCENDING) -def refresh_pool_in_list(pool_list, conn, uuid): +def refresh_pool_in_list(pool_list, conn, connkey): for row in pool_list.get_model(): - if row[0] == uuid: - # Update active sensitivity and percent available for passed uuid - row[3] = get_pool_size_percent(conn, uuid) - row[2] = conn.get_pool(uuid).is_active() - return + if row[0] != connkey: + continue + + # Update active sensitivity and percent available for passed key + row[3] = get_pool_size_percent(conn, connkey) + row[2] = conn.get_pool(connkey).is_active() + return def populate_storage_pools(pool_list, conn, curpool): @@ -1281,19 +1294,20 @@ def populate_storage_pools(pool_list, conn, curpool): pool_list.set_model(None) pool_list.get_selection().unselect_all() model.clear() - for uuid in conn.list_pool_uuids(): - per = get_pool_size_percent(conn, uuid) - pool = conn.get_pool(uuid) + for pool in conn.list_pools(): + connkey = pool.get_connkey() + per = get_pool_size_percent(conn, connkey) + pool = conn.get_pool(connkey) name = pool.get_name() typ = StoragePool.get_pool_type_desc(pool.get_type()) label = "%s\n%s" % (name, typ) - model.append([uuid, label, pool.is_active(), per]) + model.append([connkey, label, pool.is_active(), per]) pool_list.set_model(model) uiutil.set_row_selection(pool_list, - curpool and curpool.get_uuid() or None) + curpool and curpool.get_connkey() or None) def populate_storage_volumes(list_widget, pool, sensitive_cb): @@ -1333,8 +1347,8 @@ def populate_storage_volumes(list_widget, pool, sensitive_cb): model.append(row) -def get_pool_size_percent(conn, uuid): - pool = conn.get_pool(uuid) +def get_pool_size_percent(conn, connkey): + pool = conn.get_pool(connkey) cap = pool.get_capacity() alloc = pool.get_allocation() if not cap or alloc is None: diff --git a/virtManager/inspection.py b/virtManager/inspection.py index 743157ae3..2bc7b23bb 100644 --- a/virtManager/inspection.py +++ b/virtManager/inspection.py @@ -64,9 +64,9 @@ class vmmInspection(vmmGObject): self._q.put(obj) # Called by the main thread whenever a VM is added to vmlist. - def vm_added(self, conn, uuid): + def vm_added(self, conn, connkey): ignore = conn - ignore = uuid + ignore = connkey obj = ("vm_added") self._q.put(obj) @@ -118,7 +118,7 @@ class vmmInspection(vmmGObject): # Any VMs we've not seen yet? If so, process them. def _process_vms(self): for conn in self._conns.itervalues(): - for vmuuid in conn.list_vm_uuids(): + for vm in conn.list_vms(): if not conn.is_active(): break @@ -127,9 +127,9 @@ class vmmInspection(vmmGObject): data.error = True self._set_vm_inspection_data(vm, data) + vmuuid = vm.get_uuid() prettyvm = vmuuid try: - vm = conn.get_vm(vmuuid) prettyvm = conn.get_uri() + ":" + vm.get_name() if vmuuid in self._vmseen: diff --git a/virtManager/interface.py b/virtManager/interface.py index 97d0aa0c4..55475a238 100644 --- a/virtManager/interface.py +++ b/virtManager/interface.py @@ -27,7 +27,6 @@ class vmmInterface(vmmLibvirtObject): def __init__(self, conn, backend, key): vmmLibvirtObject.__init__(self, conn, backend, key, Interface) - self._name = key self._active = True (self._inactive_xml_flags, @@ -68,9 +67,6 @@ class vmmInterface(vmmLibvirtObject): def is_active(self): return self._active - def get_name(self): - return self._name - def get_mac(self): return self.get_xmlobj().macaddr diff --git a/virtManager/libvirtobject.py b/virtManager/libvirtobject.py index 50d44e843..e3fc004ab 100644 --- a/virtManager/libvirtobject.py +++ b/virtManager/libvirtobject.py @@ -49,6 +49,10 @@ class vmmLibvirtObject(vmmGObject): self._inactive_xml_flags = 0 self._active_xml_flags = 0 + # Cache object name + self._name = None + self.get_name() + self.connect("config-changed", self._reparse_xml) @staticmethod @@ -75,7 +79,7 @@ class vmmLibvirtObject(vmmGObject): def get_backend(self): return self._backend - def get_key(self): + def get_connkey(self): return self._key def change_name_backend(self, newbackend): @@ -107,8 +111,6 @@ class vmmLibvirtObject(vmmGObject): # Functions that should probably be overridden in sub class # ############################################################# - def get_name(self): - raise NotImplementedError() def _XMLDesc(self, flags): raise NotImplementedError() def _using_events(self): @@ -125,6 +127,14 @@ class vmmLibvirtObject(vmmGObject): ignore = from_event ignore = log + def get_name(self): + if self._name is None: + self._name = self._backend_get_name() + return self._name + + def _backend_get_name(self): + return self._backend.name() + ################## # Public XML API # diff --git a/virtManager/manager.py b/virtManager/manager.py index 1b54b026d..1ca84e481 100644 --- a/virtManager/manager.py +++ b/virtManager/manager.py @@ -116,7 +116,7 @@ class vmmManager(vmmGObjectUI): self.ignore_pause = False - # Mapping of VM UUID -> tree model rows to + # Mapping of rowkey -> tree model rows to # allow O(1) access instead of O(n) self.rows = {} @@ -445,12 +445,6 @@ class vmmManager(vmmGObjectUI): else: return handle.conn - def current_vmuuid(self): - vm = self.current_vm() - if vm is None: - return None - return vm.get_uuid() - def current_conn_uri(self, default_selection=False): vmlist = self.widget("vm-list") model = vmlist.get_model() @@ -504,7 +498,7 @@ class vmmManager(vmmGObjectUI): return if vm: - self.emit("action-show-domain", conn.get_uri(), vm.get_uuid()) + self.emit("action-show-domain", conn.get_uri(), vm.get_connkey()) else: if not self.open_conn(): self.emit("action-show-host", conn.get_uri()) @@ -515,7 +509,7 @@ class vmmManager(vmmGObjectUI): if vm is None: self._do_delete_conn(conn) else: - self.emit("action-delete-domain", conn.get_uri(), vm.get_uuid()) + self.emit("action-delete-domain", conn.get_uri(), vm.get_connkey()) def _do_delete_conn(self, conn): if conn is None: @@ -553,27 +547,28 @@ class vmmManager(vmmGObjectUI): def start_vm(self, ignore): vm = self.current_vm() - if vm is not None: - self.emit("action-run-domain", - vm.conn.get_uri(), vm.get_uuid()) + if vm is None: + return + self.emit("action-run-domain", vm.conn.get_uri(), vm.get_connkey()) def poweroff_vm(self, ignore): vm = self.current_vm() - if vm is not None: - self.emit("action-shutdown-domain", - vm.conn.get_uri(), vm.get_uuid()) + if vm is None: + return + self.emit("action-shutdown-domain", + vm.conn.get_uri(), vm.get_connkey()) def pause_vm(self, ignore): vm = self.current_vm() - if vm is not None: - self.emit("action-suspend-domain", - vm.conn.get_uri(), vm.get_uuid()) + if vm is None: + return + self.emit("action-suspend-domain", vm.conn.get_uri(), vm.get_connkey()) def resume_vm(self, ignore): vm = self.current_vm() - if vm is not None: - self.emit("action-resume-domain", - vm.conn.get_uri(), vm.get_uuid()) + if vm is None: + return + self.emit("action-resume-domain", vm.conn.get_uri(), vm.get_connkey()) def close_conn(self, ignore): conn = self.current_conn() @@ -594,8 +589,8 @@ class vmmManager(vmmGObjectUI): def vm_row_key(self, vm): return vm.get_uuid() + ":" + vm.conn.get_uri() - def vm_added(self, conn, vmuuid): - vm = conn.get_vm(vmuuid) + def vm_added(self, conn, connkey): + vm = conn.get_vm(connkey) if self.vm_row_key(vm) in self.rows: return @@ -609,14 +604,14 @@ class vmmManager(vmmGObjectUI): self._append_vm(model, vm, conn) - def vm_removed(self, conn, vmuuid): + def vm_removed(self, conn, connkey): vmlist = self.widget("vm-list") model = vmlist.get_model() parent = self.rows[conn.get_uri()].iter for row in range(model.iter_n_children(parent)): vm = model[model.iter_nth_child(parent, row)][ROW_HANDLE] - if vm.get_uuid() == vmuuid: + if vm.get_connkey() == connkey: model.remove(model.iter_nth_child(parent, row)) del self.rows[self.vm_row_key(vm)] break diff --git a/virtManager/netlist.py b/virtManager/netlist.py index 2b3f72522..14ff60ef8 100644 --- a/virtManager/netlist.py +++ b/virtManager/netlist.py @@ -140,8 +140,7 @@ class vmmNetworkList(vmmGObjectUI): hasNet = False netIdxLabel = None - for uuid in self.conn.list_net_uuids(): - net = self.conn.get_net(uuid) + for net in self.conn.list_nets(): nettype = virtinst.VirtualNetworkInterface.TYPE_VIRTUAL label = self._pretty_network_desc(nettype, net.get_name(), net) @@ -156,7 +155,7 @@ class vmmNetworkList(vmmGObjectUI): vnet_dict[label] = self._build_source_row( nettype, net.get_name(), label, True, - net.is_active(), key=net.get_uuid()) + net.is_active(), key=net.get_connkey()) # Build a list of vnet bridges, so we know not to list them # in the physical interface list @@ -173,7 +172,7 @@ class vmmNetworkList(vmmGObjectUI): def _find_physical_devices(self, vnet_bridges): vnet_taps = [] - for vm in self.conn.vms.values(): + for vm in self.conn.list_vms(): for nic in vm.get_network_devices(refresh_if_nec=False): if nic.target_dev and nic.target_dev not in vnet_taps: vnet_taps.append(nic.target_dev) @@ -184,8 +183,8 @@ class vmmNetworkList(vmmGObjectUI): brIdxLabel = None skip_ifaces = ["lo"] - for name in self.conn.list_net_device_paths(): - br = self.conn.get_net_device(name) + for br in self.conn.list_netdevs(): + name = br.name bridge_name = br.get_bridge() nettype = virtinst.VirtualNetworkInterface.TYPE_BRIDGE @@ -334,7 +333,7 @@ class vmmNetworkList(vmmGObjectUI): # Make sure VirtualNetwork is running netobj = None if nettype == virtinst.VirtualNetworkInterface.TYPE_VIRTUAL: - for net in self.conn.nets.values(): + for net in self.conn.list_nets(): if net.get_name() == devname: netobj = net break @@ -515,9 +514,9 @@ class vmmNetworkList(vmmGObjectUI): self.widget("net-bridge-name"), show_bridge) portgroups = [] - key = row[6] - if key and row[0] == virtinst.VirtualNetworkInterface.TYPE_VIRTUAL: - portgroups = self.conn.get_net(key).get_xmlobj().portgroups + connkey = row[6] + if connkey and row[0] == virtinst.VirtualNetworkInterface.TYPE_VIRTUAL: + portgroups = self.conn.get_net(connkey).get_xmlobj().portgroups uiutil.set_grid_row_visible( self.widget("net-portgroup"), bool(portgroups)) diff --git a/virtManager/network.py b/virtManager/network.py index 4c55b0889..163a0c019 100644 --- a/virtManager/network.py +++ b/virtManager/network.py @@ -54,8 +54,6 @@ class vmmNetwork(vmmLibvirtObject): # Required class methods # ########################## - def get_name(self): - return self._backend.name() def _XMLDesc(self, flags): return self._backend.XMLDesc(flags) def _define(self, xml): diff --git a/virtManager/nodedev.py b/virtManager/nodedev.py index 643faffde..0d9531795 100644 --- a/virtManager/nodedev.py +++ b/virtManager/nodedev.py @@ -34,8 +34,6 @@ class vmmNodeDevice(vmmLibvirtObject): def _XMLDesc(self, flags): return self._backend.XMLDesc(flags) - def get_name(self): - return self._name def is_active(self): return True diff --git a/virtManager/storagebrowse.py b/virtManager/storagebrowse.py index 129f2dfe2..0071eaef6 100644 --- a/virtManager/storagebrowse.py +++ b/virtManager/storagebrowse.py @@ -182,8 +182,9 @@ class vmmStorageBrowser(vmmGObjectUI): if not self._first_run: self._first_run = True pool = self.conn.get_default_pool() - uiutil.set_row_selection( - self.widget("pool-list"), pool and pool.get_uuid() or None) + uiutil.set_row_selection(self.widget("pool-list"), + pool and pool.get_connkey() or None) + # Manually trigger vol_selected, so buttons are in the correct state self.vol_selected() self.pool_selected() @@ -222,8 +223,10 @@ class vmmStorageBrowser(vmmGObjectUI): row = uiutil.get_list_selection(self.widget("pool-list"), None) if not row: return + + connkey = row[0] try: - return self.conn.get_pool(row[0]) + return self.conn.get_pool(connkey) except KeyError: return None @@ -239,18 +242,22 @@ class vmmStorageBrowser(vmmGObjectUI): return return pool.get_volume(row[0]) - def refresh_storage_pool(self, src_ignore, uuid): + def refresh_storage_pool(self, src, connkey): + ignore = src + pool_list = self.widget("pool-list") - host.refresh_pool_in_list(pool_list, self.conn, uuid) + host.refresh_pool_in_list(pool_list, self.conn, connkey) curpool = self.current_pool() - if curpool.get_uuid() != uuid: + if curpool.get_connkey() != connkey: return # Currently selected pool changed state: force a 'pool_selected' to # update vol list self.pool_selected(self.widget("pool-list").get_selection()) - def repopulate_storage_pools(self, src_ignore=None, uuid_ignore=None): + def repopulate_storage_pools(self, src=None, connkey=None): + ignore = src + ignore = connkey pool_list = self.widget("pool-list") host.populate_storage_pools(pool_list, self.conn, self.current_pool()) @@ -327,7 +334,7 @@ class vmmStorageBrowser(vmmGObjectUI): return cp.refresh() - self.refresh_storage_pool(None, cp.get_uuid()) + self.refresh_storage_pool(None, cp.get_connkey()) name = createvol and createvol.vol.name or None vol_list = self.widget("vol-list") diff --git a/virtManager/storagepool.py b/virtManager/storagepool.py index 17a353d69..1a5bdabb4 100644 --- a/virtManager/storagepool.py +++ b/virtManager/storagepool.py @@ -38,8 +38,6 @@ class vmmStorageVolume(vmmLibvirtObject): # Required class methods # ########################## - def get_name(self): - return self.get_xmlobj().name def _XMLDesc(self, flags): try: return self._backend.XMLDesc(flags) @@ -54,8 +52,10 @@ class vmmStorageVolume(vmmLibvirtObject): ########### def get_parent_pool(self): - pobj = self._backend.storagePoolLookupByVolume() - return self.conn.get_pool_by_name(pobj.name()) + name = self._backend.storagePoolLookupByVolume().name() + for pool in self.conn.list_pools(): + if pool.get_name() == name: + return pool def delete(self, force=True): ignore = force @@ -115,8 +115,6 @@ class vmmStoragePool(vmmLibvirtObject): # Required class methods # ########################## - def get_name(self): - return self.get_xmlobj().name def _XMLDesc(self, flags): return self._backend.XMLDesc(flags) def _define(self, xml): @@ -203,8 +201,8 @@ class vmmStoragePool(vmmLibvirtObject): self.update_volumes() return self._volumes - def get_volume(self, uuid): - return self._volumes[uuid] + def get_volume(self, key): + return self._volumes[key] def update_volumes(self, refresh=False): if not self.is_active(): diff --git a/virtManager/systray.py b/virtManager/systray.py index 679f19b9f..03e018c3c 100644 --- a/virtManager/systray.py +++ b/virtManager/systray.py @@ -164,12 +164,12 @@ class vmmSystray(vmmGObject): # Helper functions def _get_vm_menu_item(self, vm): - uuid = vm.get_uuid() + connkey = vm.get_connkey() uri = vm.conn.get_uri() if uri in self.conn_vm_menuitems: - if uuid in self.conn_vm_menuitems[uri]: - return self.conn_vm_menuitems[uri][uuid] + if connkey in self.conn_vm_menuitems[uri]: + return self.conn_vm_menuitems[uri][connkey] return None def _set_vm_status_icon(self, vm, menu_item): @@ -256,8 +256,8 @@ class vmmSystray(vmmGObject): vm_submenu.remove(c) vm_mappings = {} - for vm in conn.vms.values(): - vm_mappings[vm.get_name()] = vm.get_uuid() + for vm in conn.list_vms(): + vm_mappings[vm.get_name()] = vm.get_connkey() vm_names = vm_mappings.keys() vm_names.sort() @@ -270,30 +270,30 @@ class vmmSystray(vmmGObject): for i in range(0, len(vm_names)): name = vm_names[i] - uuid = vm_mappings[name] - if uuid in self.conn_vm_menuitems[uri]: - vm_item = self.conn_vm_menuitems[uri][uuid] + connkey = vm_mappings[name] + if connkey in self.conn_vm_menuitems[uri]: + vm_item = self.conn_vm_menuitems[uri][connkey] vm_submenu.insert(vm_item, i) - def vm_added(self, conn, uuid): + def vm_added(self, conn, connkey): uri = conn.get_uri() - vm = conn.get_vm(uuid) + vm = conn.get_vm(connkey) if not vm: return vm.connect("status-changed", self.vm_state_changed) vm_mappings = self.conn_vm_menuitems[uri] - if uuid in vm_mappings: + if connkey in vm_mappings: return # Build VM list entry menu_item = Gtk.ImageMenuItem.new_with_label(vm.get_name()) menu_item.set_use_underline(False) - vm_mappings[uuid] = menu_item + vm_mappings[connkey] = menu_item vm_action_menu = vmmenu.VMActionMenu(self, lambda: vm) menu_item.set_submenu(vm_action_menu) - self.vm_action_dict[uuid] = vm_action_menu + self.vm_action_dict[connkey] = vm_action_menu # Add VM to menu list self.populate_vm_list(conn) @@ -302,26 +302,28 @@ class vmmSystray(vmmGObject): self.vm_state_changed(vm) menu_item.show() - def vm_removed(self, conn, uuid): + def vm_removed(self, conn, connkey): uri = conn.get_uri() vm_mappings = self.conn_vm_menuitems[uri] if not vm_mappings: return - if uuid in vm_mappings: - conn_item = self.conn_menuitems[uri] - vm_menu_item = vm_mappings[uuid] - vm_menu = conn_item.get_submenu() - vm_menu.remove(vm_menu_item) - vm_menu_item.destroy() - del(vm_mappings[uuid]) + if connkey not in vm_mappings: + return - if len(vm_menu.get_children()) == 0: - placeholder = Gtk.MenuItem.new_with_label( - _("No virtual machines")) - placeholder.show() - placeholder.set_sensitive(False) - vm_menu.add(placeholder) + conn_item = self.conn_menuitems[uri] + vm_menu_item = vm_mappings[connkey] + vm_menu = conn_item.get_submenu() + vm_menu.remove(vm_menu_item) + vm_menu_item.destroy() + del(vm_mappings[connkey]) + + if len(vm_menu.get_children()) == 0: + placeholder = Gtk.MenuItem.new_with_label( + _("No virtual machines")) + placeholder.show() + placeholder.set_sensitive(False) + vm_menu.add(placeholder) def vm_state_changed(self, vm, ignore=None, ignore2=None): menu_item = self._get_vm_menu_item(vm) @@ -331,7 +333,7 @@ class vmmSystray(vmmGObject): self._set_vm_status_icon(vm, menu_item) # Update action widget states - menu = self.vm_action_dict[vm.get_uuid()] + menu = self.vm_action_dict[vm.get_connkey()] menu.update_widget_states(vm) def exit_app(self, ignore): diff --git a/virtManager/vmmenu.py b/virtManager/vmmenu.py index 66671b9ef..9ebaaa9b8 100644 --- a/virtManager/vmmenu.py +++ b/virtManager/vmmenu.py @@ -60,7 +60,7 @@ class _VMMenu(Gtk.Menu): if not vm: return self._parent.emit("action-%s-domain" % src.vmm_widget_name, - vm.conn.get_uri(), vm.get_uuid()) + vm.conn.get_uri(), vm.get_connkey()) def _init_state(self): raise NotImplementedError() diff --git a/virtinst/pollhelpers.py b/virtinst/pollhelpers.py index a79715fe8..a9b152769 100644 --- a/virtinst/pollhelpers.py +++ b/virtinst/pollhelpers.py @@ -19,14 +19,12 @@ import logging -from virtinst import util - # Debugging helper to force old style polling _force_old_poll = False -def _new_poll_helper(origmap, typename, listfunc, keyfunc, buildfunc): +def _new_poll_helper(origmap, typename, listfunc, buildfunc): """ Helper for new style listAll* APIs """ @@ -40,27 +38,26 @@ def _new_poll_helper(origmap, typename, listfunc, keyfunc, buildfunc): logging.debug("Unable to list all %ss: %s", typename, e) for obj in objs: - key = getattr(obj, keyfunc)() + connkey = obj.name() - if key not in origmap: + if connkey not in origmap: # Object is brand new this period - current[key] = buildfunc(obj, key) - new[key] = current[key] + current[connkey] = buildfunc(obj, connkey) + new[connkey] = current[connkey] else: # Previously known object - current[key] = origmap[key] - del origmap[key] + current[connkey] = origmap[connkey] + del(origmap[connkey]) return (origmap, new, current) def _old_poll_helper(origmap, typename, active_list, inactive_list, - lookup_func, build_func, - key_is_uuid=False): + lookup_func, build_func): """ Helper routine for old style split API libvirt polling. - @origmap: Pre-existing mapping of objects, with key->obj mapping. + @origmap: Pre-existing mapping of objects, with connkey->obj mapping. objects must have an is_active and set_active API @typename: string describing type of objects we are polling for use in debug messages. @@ -68,9 +65,7 @@ def _old_poll_helper(origmap, typename, @inactive_list: Function that returns the list of inactive objects @lookup_func: Function to get an object handle for the passed name @build_func: Function that builds a new object class. It is passed - args of (raw libvirt object, key (usually UUID)) - @key_is_uuid: If True, we use the object UUID as the returned dictionary - keys + args of (raw libvirt object, connkey) """ current = {} new = {} @@ -88,26 +83,24 @@ def _old_poll_helper(origmap, typename, def check_obj(name): obj = None - key = name - if key not in origmap or key_is_uuid: - try: - obj = lookup_func(key) - except Exception, e: - logging.debug("Could not fetch %s '%s': %s", - typename, key, e) - return + connkey = name - if key_is_uuid: - key = obj.UUIDString() + if connkey not in origmap: + if connkey not in origmap: + try: + obj = lookup_func(name) + except Exception, e: + logging.debug("Could not fetch %s '%s': %s", + typename, connkey, e) + return - if key not in origmap: # Object is brand new this period - current[key] = build_func(obj, key) - new[key] = current[key] + current[connkey] = build_func(obj, connkey) + new[connkey] = current[connkey] else: # Previously known object - current[key] = origmap[key] - del origmap[key] + current[connkey] = origmap[connkey] + del(origmap[connkey]) for name in newActiveNames + newInactiveNames: try: @@ -124,8 +117,7 @@ def fetch_nets(backend, origmap, build_func): if backend.check_support( backend.SUPPORT_CONN_LISTALLNETWORKS) and not _force_old_poll: return _new_poll_helper(origmap, name, - backend.listAllNetworks, - "UUIDString", build_func) + backend.listAllNetworks, build_func) else: active_list = backend.listNetworks inactive_list = backend.listDefinedNetworks @@ -133,8 +125,7 @@ def fetch_nets(backend, origmap, build_func): return _old_poll_helper(origmap, name, active_list, inactive_list, - lookup_func, build_func, - key_is_uuid=True) + lookup_func, build_func) def fetch_pools(backend, origmap, build_func): @@ -143,8 +134,7 @@ def fetch_pools(backend, origmap, build_func): if backend.check_support( backend.SUPPORT_CONN_LISTALLSTORAGEPOOLS) and not _force_old_poll: return _new_poll_helper(origmap, name, - backend.listAllStoragePools, - "UUIDString", build_func) + backend.listAllStoragePools, build_func) else: active_list = backend.listStoragePools inactive_list = backend.listDefinedStoragePools @@ -152,8 +142,7 @@ def fetch_pools(backend, origmap, build_func): return _old_poll_helper(origmap, name, active_list, inactive_list, - lookup_func, build_func, - key_is_uuid=True) + lookup_func, build_func) def fetch_volumes(backend, pool, origmap, build_func): @@ -162,8 +151,7 @@ def fetch_volumes(backend, pool, origmap, build_func): if backend.check_support( backend.SUPPORT_POOL_LISTALLVOLUMES, pool) and not _force_old_poll: return _new_poll_helper(origmap, name, - pool.listAllVolumes, - "name", build_func) + pool.listAllVolumes, build_func) else: active_list = pool.listVolumes inactive_list = lambda: [] @@ -179,8 +167,7 @@ def fetch_interfaces(backend, origmap, build_func): if backend.check_support( backend.SUPPORT_CONN_LISTALLINTERFACES) and not _force_old_poll: return _new_poll_helper(origmap, name, - backend.listAllInterfaces, - "name", build_func) + backend.listAllInterfaces, build_func) else: active_list = backend.listInterfaces inactive_list = backend.listDefinedInterfaces @@ -196,8 +183,7 @@ def fetch_nodedevs(backend, origmap, build_func): if backend.check_support( backend.SUPPORT_CONN_LISTALLDEVICES) and not _force_old_poll: return _new_poll_helper(origmap, name, - backend.listAllDevices, - "name", build_func) + backend.listAllDevices, build_func) else: active_list = lambda: backend.listDevices(None, 0) inactive_list = lambda: [] @@ -220,8 +206,7 @@ def _old_fetch_vms(backend, origmap, build_func): new = {} # Build list of previous vms with proper id/name mappings - for uuid in origmap: - vm = origmap[uuid] + for vm in origmap.values(): if vm.is_active(): oldActiveIDs[vm.get_id()] = vm else: @@ -238,20 +223,20 @@ def _old_fetch_vms(backend, origmap, build_func): logging.exception("Unable to list inactive domains: %s", e) def add_vm(vm): - uuid = vm.get_uuid() + connkey = vm.get_name() - current[uuid] = vm - del(origmap[uuid]) + current[connkey] = vm + del(origmap[connkey]) - def check_new(rawvm, uuid): - if uuid in origmap: - vm = origmap[uuid] - del(origmap[uuid]) + def check_new(rawvm, connkey): + if connkey in origmap: + vm = origmap[connkey] + del(origmap[connkey]) else: - vm = build_func(rawvm, uuid) - new[uuid] = vm + vm = build_func(rawvm, connkey) + new[connkey] = vm - current[uuid] = vm + current[connkey] = vm for _id in newActiveIDs: if _id in oldActiveIDs: @@ -262,9 +247,9 @@ def _old_fetch_vms(backend, origmap, build_func): # Check if domain is brand new, or old one that changed state try: vm = backend.lookupByID(_id) - uuid = util.uuidstr(vm.UUID()) + connkey = vm.name() - check_new(vm, uuid) + check_new(vm, connkey) except: logging.exception("Couldn't fetch domain id '%s'", _id) @@ -278,9 +263,9 @@ def _old_fetch_vms(backend, origmap, build_func): # Check if domain is brand new, or old one that changed state try: vm = backend.lookupByName(name) - uuid = util.uuidstr(vm.UUID()) + connkey = name - check_new(vm, uuid) + check_new(vm, connkey) except: logging.exception("Couldn't fetch domain '%s'", name) @@ -290,9 +275,8 @@ def _old_fetch_vms(backend, origmap, build_func): def fetch_vms(backend, origmap, build_func): name = "domain" if backend.check_support( - backend.SUPPORT_CONN_LISTALLDOMAINS): + backend.SUPPORT_CONN_LISTALLDOMAINS): return _new_poll_helper(origmap, name, - backend.listAllDomains, - "UUIDString", build_func) + backend.listAllDomains, build_func) else: return _old_fetch_vms(backend, origmap, build_func) diff --git a/virtinst/util.py b/virtinst/util.py index 0dee05523..19a9ed520 100644 --- a/virtinst/util.py +++ b/virtinst/util.py @@ -434,20 +434,6 @@ def local_libvirt_version(): return getattr(libvirt, key) -def uuidstr(rawuuid): - hx = ['0', '1', '2', '3', '4', '5', '6', '7', - '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'] - uuid = [] - for i in range(16): - uuid.append(hx[((ord(rawuuid[i]) >> 4) & 0xf)]) - uuid.append(hx[(ord(rawuuid[i]) & 0xf)]) - if i == 3 or i == 5 or i == 7 or i == 9: - uuid.append('-') - return "".join(uuid) - - - - def get_system_scratchdir(hvtype): if "VIRTINST_TEST_SUITE" in os.environ: return os.getcwd()