diff --git a/include/RankScheduler.h b/include/RankScheduler.h index a8798eaddd..2682e30a43 100644 --- a/include/RankScheduler.h +++ b/include/RankScheduler.h @@ -33,12 +33,13 @@ class RankScheduler : public Scheduler { public: - RankScheduler():Scheduler(),rp_host(0),rp_ds(0),rp_vm(0){}; + RankScheduler():Scheduler(),rp_host(0),rp_ds(0),rp_nics(0), rp_vm(0){}; ~RankScheduler() { delete rp_host; delete rp_ds; + delete rp_nics; delete rp_vm; }; @@ -56,11 +57,16 @@ public: rp_vm = new UserPriorityPolicy(vmpool, 1.0); add_vm_policy(rp_vm); + + rp_nics = new RankNetworkPolicy(vnetpool, conf.get_nics_policy(), 1.0); + + add_nic_policy(rp_nics); }; private: RankPolicy * rp_host; RankPolicy * rp_ds; + RankPolicy * rp_nics; UserPriorityPolicy * rp_vm; }; \ No newline at end of file diff --git a/include/RequestManagerVirtualMachine.h b/include/RequestManagerVirtualMachine.h index 48f17183fc..8ce375420f 100644 --- a/include/RequestManagerVirtualMachine.h +++ b/include/RequestManagerVirtualMachine.h @@ -104,6 +104,8 @@ protected: RequestAttributes& att); VirtualMachine * get_vm(int id, RequestAttributes& att); + + VirtualMachine * get_vm_ro(int id, RequestAttributes& att); }; /* ------------------------------------------------------------------------- */ @@ -133,7 +135,7 @@ public: VirtualMachineDeploy(): RequestManagerVirtualMachine("one.vm.deploy", "Deploys a virtual machine", - "A:siibi") + "A:siibis") { auth_op = Nebula::instance().get_vm_auth_op(History::DEPLOY_ACTION); }; diff --git a/include/VirtualMachine.h b/include/VirtualMachine.h index d72b30274c..9f382bbbbe 100644 --- a/include/VirtualMachine.h +++ b/include/VirtualMachine.h @@ -1159,6 +1159,8 @@ public: * @param files space separated list of paths to be included in the CBD * @param disk_id CONTEXT/DISK_ID attribute value * @param password Password to encrypt the token, if it is set + * @param only_auto boolean to generate context only for vnets + * with NETWORK_MODE = auto * @return -1 in case of error, 0 if the VM has no context, 1 on success */ int generate_context(string &files, int &disk_id, const string& password); @@ -1616,6 +1618,14 @@ public: disks.clear_cloning_image_id(image_id, source); } + /** + * Get network leases with NETWORK_MODE = auto for this Virtual Machine + * @pram tmpl with the scheduling results for the auto NICs + * @param estr description if any + * @return 0 if success + */ + int get_auto_network_leases(VirtualMachineTemplate * tmpl, string &estr); + private: // ------------------------------------------------------------------------- @@ -1948,9 +1958,12 @@ private: * netowrking updates. * @param context attribute of the VM * @param error string if any + * @param only_auto boolean to generate context only for vnets + * with NETWORK_MODE = auto * @return 0 on success */ - int generate_network_context(VectorAttribute * context, string& error); + int generate_network_context(VectorAttribute * context, string& error, + bool only_auto); /** * Deletes the NETWORK related CONTEXT section for the given nic, i.e. @@ -1979,9 +1992,11 @@ private: * Parse the "CONTEXT" attribute of the template by substituting * $VARIABLE, $VARIABLE[ATTR] and $VARIABLE[ATTR, ATTR = VALUE] * @param error_str Returns the error reason, if any + * @param only_auto boolean to parse only the context for vnets + * with NETWORK_MODE = auto * @return 0 on success */ - int parse_context(string& error_str); + int parse_context(string& error_str, bool all_nics); /** * Parses the current contents of the context vector attribute, without @@ -1998,7 +2013,7 @@ private: // Management helpers: NIC, DISK and VMGROUP // ------------------------------------------------------------------------- /** - * Get all network leases for this Virtual Machine + * Get network leases (no auto NICs, NETWORK_MODE != auto) for this VM * @return 0 if success */ int get_network_leases(string &error_str); diff --git a/include/VirtualMachineNic.h b/include/VirtualMachineNic.h index 7a4f16110f..aa4717e38c 100644 --- a/include/VirtualMachineNic.h +++ b/include/VirtualMachineNic.h @@ -256,6 +256,9 @@ public: VectorAttribute * nic_default, std::vector& sgs, std::string& estr); + int get_auto_network_leases(int vm_id, int uid, VectorAttribute * nic_default, + vector& sgs, std::string& error_str); + /** * Release all the network leases and SG associated to the set * @param vmid of the VM diff --git a/src/cli/one_helper.rb b/src/cli/one_helper.rb index 068835b16c..ccd44cf09c 100644 --- a/src/cli/one_helper.rb +++ b/src/cli/one_helper.rb @@ -246,7 +246,8 @@ EOT :description => "Networks to attach. To use a network owned by"<< " other user use user[network]. Additional"<< " attributes are supported like with the --disk"<< - " option.", + " option. Also you can use auto if you want that" << + " OpenNebula select automatically the network", :format => Array }, { @@ -1241,15 +1242,19 @@ EOT user, object=*res template<<"#{section.upcase}=[\n" - template<<" #{name.upcase}_UNAME=\"#{user}\",\n" if user - extra_attributes.each do |extra_attribute| - key, value = extra_attribute.split("=") - template<<" #{key.upcase}=\"#{value}\",\n" - end - if object.match(/^\d+$/) - template<<" #{name.upcase}_ID=#{object}\n" + if object.casecmp? "auto" + template<<" NETWORK_MODE=\"#{object}\"\n" else - template<<" #{name.upcase}=\"#{object}\"\n" + template<<" #{name.upcase}_UNAME=\"#{user}\",\n" if user + extra_attributes.each do |extra_attribute| + key, value = extra_attribute.split("=") + template<<" #{key.upcase}=\"#{value}\",\n" + end + if object.match(/^\d+$/) + template<<" #{name.upcase}_ID=#{object}\n" + else + template<<" #{name.upcase}=\"#{object}\"\n" + end end template<<"]\n" end if objects diff --git a/src/cli/onevm b/src/cli/onevm index 4971565f00..c295f8efba 100755 --- a/src/cli/onevm +++ b/src/cli/onevm @@ -483,15 +483,21 @@ cmd=CommandParser::CmdParser.new(ARGV) do EOT command :deploy, deploy_desc, [:range,:vmid_list], :hostid, [:datastoreid,nil], - :options=>[ENFORCE] do + :options=>[ENFORCE, OneVMHelper::FILE] do host_id = args[1] verbose = "deploying in host #{host_id}" enforce = options[:enforce].nil? ? false : options[:enforce] ds_id = args[2].nil? ? -1 : args[2] + extra_template = nil + + if options[:file] + extra_template = File.read(options[:file]) + end + helper.perform_actions(args[0],options,verbose) do |vm| - vm.deploy(host_id, enforce, ds_id) + vm.deploy(host_id, enforce, ds_id, extra_template) end end diff --git a/src/oca/java/src/org/opennebula/client/vm/VirtualMachine.java b/src/oca/java/src/org/opennebula/client/vm/VirtualMachine.java index cb94dc2203..0ad4a3b9d1 100644 --- a/src/oca/java/src/org/opennebula/client/vm/VirtualMachine.java +++ b/src/oca/java/src/org/opennebula/client/vm/VirtualMachine.java @@ -645,9 +645,9 @@ public class VirtualMachine extends PoolElement{ * default, set it to -1 * @return If an error occurs the error message contains the reason. */ - public OneResponse deploy(int hostId, boolean enforce, int dsId) + public OneResponse deploy(int hostId, boolean enforce, int dsId, String extra_template) { - return client.call(DEPLOY, id, hostId, enforce, dsId); + return client.call(DEPLOY, id, hostId, enforce, dsId, extra_template); } /** @@ -659,7 +659,7 @@ public class VirtualMachine extends PoolElement{ */ public OneResponse deploy(int hostId) { - return deploy(hostId, false, -1); + return deploy(hostId, false, -1, ""); } /** diff --git a/src/oca/ruby/opennebula/virtual_machine.rb b/src/oca/ruby/opennebula/virtual_machine.rb index a1749d753b..dc2991c886 100644 --- a/src/oca/ruby/opennebula/virtual_machine.rb +++ b/src/oca/ruby/opennebula/virtual_machine.rb @@ -341,9 +341,10 @@ module OpenNebula # # @return [nil, OpenNebula::Error] nil in case of success, Error # otherwise - def deploy(host_id, enforce=false, ds_id=-1) + def deploy(host_id, enforce=false, ds_id=-1, extra_template="") enforce ||= false ds_id ||= -1 + extra_template ||= "" self.info @@ -351,7 +352,8 @@ module OpenNebula @pe_id, host_id.to_i, enforce, - ds_id.to_i) + ds_id.to_i, + extra_template) end # Shutdowns an already deployed VM diff --git a/src/rm/RequestManagerVirtualMachine.cc b/src/rm/RequestManagerVirtualMachine.cc index b397b46861..3b1673b2dc 100644 --- a/src/rm/RequestManagerVirtualMachine.cc +++ b/src/rm/RequestManagerVirtualMachine.cc @@ -420,7 +420,24 @@ VirtualMachine * RequestManagerVirtualMachine::get_vm(int id, { VirtualMachine * vm; - vm = static_cast(pool->get(id)); + vm = static_cast(pool)->get(id); + + if ( vm == 0 ) + { + att.resp_id = id; + failure_response(NO_EXISTS, att); + return 0; + } + + return vm; +} + +VirtualMachine * RequestManagerVirtualMachine::get_vm_ro(int id, + RequestAttributes& att) +{ + VirtualMachine * vm; + + vm = static_cast(pool)->get_ro(id); if ( vm == 0 ) { @@ -735,6 +752,7 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList, DatastorePool * dspool = nd.get_dspool(); VirtualMachine * vm; + VirtualMachineTemplate tmpl; string hostname; string vmm_mad; @@ -745,13 +763,14 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList, PoolObjectAuth * auth_ds_perms; string tm_mad; + string error_str; bool auth = false; + bool check_nic_auto = false; // ------------------------------------------------------------------------ // Get request parameters and information about the target host // ------------------------------------------------------------------------ - int id = xmlrpc_c::value_int(paramList.getInt(1)); int hid = xmlrpc_c::value_int(paramList.getInt(2)); bool enforce = false; @@ -767,6 +786,21 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList, ds_id = xmlrpc_c::value_int(paramList.getInt(4)); } + if ( paramList.size() > 5 ) // Template with network scheduling results + { + std::string str_tmpl = xmlrpc_c::value_string(paramList.getString(5)); + + check_nic_auto = !str_tmpl.empty(); + + int rc = tmpl.parse_str_or_xml(str_tmpl, att.resp_msg); + + if ( rc != 0 ) + { + failure_response(INTERNAL, att); + return; + } + } + if (get_host_information(hid, hostname, vmm_mad, @@ -778,11 +812,11 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList, return; } + // ------------------------------------------------------------------------ // Get information about the system DS to use (tm_mad & permissions) // ------------------------------------------------------------------------ - - if ((vm = get_vm(id, att)) == 0) + if ((vm = get_vm_ro(id, att)) == 0) { return; } @@ -793,8 +827,13 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList, vm->get_action() == History::UNDEPLOY_HARD_ACTION)) { ds_id = vm->get_ds_id(); + + check_nic_auto = false; } + int uid = vm->get_uid(); + int gid = vm->get_gid(); + vm->unlock(); if (is_public_cloud) // Set ds_id to -1 and tm_mad empty(). This is used by @@ -867,8 +906,37 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList, // ------------------------------------------------------------------------ // Authorize request // ------------------------------------------------------------------------ + if ( check_nic_auto ) //Authorize network schedule and quotas + { + RequestAttributes att_quota(uid, gid, att); - auth = vm_authorization(id, 0, 0, att, &host_perms, auth_ds_perms, 0, auth_op); + if (!att.is_admin()) + { + string aname; + + if (tmpl.check_restricted(aname)) + { + att.resp_msg = "NIC includes a restricted attribute " + aname; + + failure_response(AUTHORIZATION, att); + return; + } + } + + if (!quota_authorization(&tmpl, Quotas::NETWORK, att_quota, att.resp_msg)) + { + failure_response(AUTHORIZATION, att); + return; + } + + auth = vm_authorization(id, 0, &tmpl, att, &host_perms, auth_ds_perms,0, + auth_op); + } + else + { + auth = vm_authorization(id, 0, 0, att, &host_perms, auth_ds_perms, 0, + auth_op); + } if (auth == false) { @@ -907,6 +975,17 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList, return; } + if ( check_nic_auto && vm->get_auto_network_leases(&tmpl, error_str) != 0 ) + { + att.resp_msg = error_str; + failure_response(ACTION, att); + + vm->unlock(); + return; + } + + static_cast(pool)->update(vm); + // ------------------------------------------------------------------------ // Add deployment dependent attributes to VM // - volatile disk (selected system DS driver) diff --git a/src/scheduler/include/RankPolicy.h b/src/scheduler/include/RankPolicy.h index ca06672c9b..7853bded61 100644 --- a/src/scheduler/include/RankPolicy.h +++ b/src/scheduler/include/RankPolicy.h @@ -118,14 +118,14 @@ private: const vector get_match_resources(ObjectXML *obj) { - VirtualMachineXML * vm = dynamic_cast(obj); + VirtualMachineXML * vm = static_cast(obj); return vm->get_match_hosts(); }; const string& get_rank(ObjectXML *obj) { - VirtualMachineXML * vm = dynamic_cast(obj); + VirtualMachineXML * vm = static_cast(obj); if (vm->get_rank().empty()) { @@ -150,14 +150,14 @@ private: const vector get_match_resources(ObjectXML *obj) { - VirtualMachineXML * vm = dynamic_cast(obj); + VirtualMachineXML * vm = static_cast(obj); return vm->get_match_datastores(); }; const string& get_rank(ObjectXML *obj) { - VirtualMachineXML * vm = dynamic_cast(obj); + VirtualMachineXML * vm = static_cast(obj); if (vm->get_ds_rank().empty()) { @@ -168,4 +168,37 @@ private: }; }; +class RankNetworkPolicy : public RankPolicy +{ +public: + + RankNetworkPolicy(VirtualNetworkPoolXML * pool, const string& dr,float w=1.0): + RankPolicy(pool, dr, w){}; + + ~RankNetworkPolicy(){}; + +private: + + const vector get_match_resources(ObjectXML *obj) + { + VirtualMachineNicXML * nic = static_cast(obj); + + return nic->get_match_networks(); + }; + + const string& get_rank(ObjectXML *obj) + { + VirtualMachineNicXML * nic = static_cast(obj); + + const std::string& nr = nic->get_rank(); + + if (nr.empty()) + { + return default_rank; + } + + return nr; + }; +}; + #endif /*RANK_POLICY_H_*/ diff --git a/src/scheduler/include/Scheduler.h b/src/scheduler/include/Scheduler.h index 59914aa1c1..f40b27c589 100644 --- a/src/scheduler/include/Scheduler.h +++ b/src/scheduler/include/Scheduler.h @@ -24,6 +24,7 @@ #include "ClusterPoolXML.h" #include "DatastorePoolXML.h" #include "VirtualMachinePoolXML.h" +#include "VirtualNetworkPoolXML.h" #include "SchedulerPolicy.h" #include "ActionManager.h" #include "AclXML.h" @@ -74,6 +75,7 @@ protected: img_dspool(0), vmpool(0), vm_roles_pool(0), + vnetpool(0), vmgpool(0), vmapool(0), timer(0), @@ -93,6 +95,7 @@ protected: delete vmpool; delete vm_roles_pool; + delete vnetpool; delete vmapool; delete dspool; @@ -119,6 +122,8 @@ protected: VirtualMachinePoolXML * vmpool; VirtualMachineRolePoolXML * vm_roles_pool; + VirtualNetworkPoolXML * vnetpool; + VMGroupPoolXML * vmgpool; VirtualMachineActionsPoolXML* vmapool; @@ -142,6 +147,11 @@ protected: vm_policies.push_back(policy); } + void add_nic_policy(SchedulerPolicy *policy) + { + nic_policies.push_back(policy); + } + // --------------------------------------------------------------- // Scheduler main methods // --------------------------------------------------------------- @@ -182,6 +192,7 @@ private: vector host_policies; vector ds_policies; vector vm_policies; + vector nic_policies; // --------------------------------------------------------------- // Configuration attributes diff --git a/src/scheduler/include/SchedulerTemplate.h b/src/scheduler/include/SchedulerTemplate.h index eb28211b40..11427df135 100644 --- a/src/scheduler/include/SchedulerTemplate.h +++ b/src/scheduler/include/SchedulerTemplate.h @@ -34,6 +34,8 @@ public: string get_ds_policy() const; + string get_nics_policy() const; + private: /** * Name for the configuration file, oned.conf diff --git a/src/scheduler/include/VirtualMachinePoolXML.h b/src/scheduler/include/VirtualMachinePoolXML.h index f6090c73a0..c7a80d7848 100644 --- a/src/scheduler/include/VirtualMachinePoolXML.h +++ b/src/scheduler/include/VirtualMachinePoolXML.h @@ -59,8 +59,9 @@ public: * @param vid the VM id * @param hid the id of the target host * @param resched the machine is going to be rescheduled + * @param extra template with result nics */ - int dispatch(int vid, int hid, int dsid, bool resched) const; + int dispatch(int vid, int hid, int dsid, bool resched, const string& extra_template) const; /** * Update the VM template diff --git a/src/scheduler/include/VirtualMachineXML.h b/src/scheduler/include/VirtualMachineXML.h index 729934ce4c..739924a3f1 100644 --- a/src/scheduler/include/VirtualMachineXML.h +++ b/src/scheduler/include/VirtualMachineXML.h @@ -31,6 +31,80 @@ class ImageDatastorePoolXML; using namespace std; +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +class VirtualMachineNicXML : public ObjectXML +{ +public: + + /** + * Returns a vector of matched datastores + */ + const vector& get_match_networks() + { + return match_networks.get_resources(); + } + + /** + * Adds a matching network + * @param oid of the network + */ + void add_match_network(int oid) + { + match_networks.add_resource(oid); + } + + /** + * Sort the matched networks for the VM + */ + void sort_match_networks() + { + match_networks.sort_resources(); + } + + /** + * Removes the matched networks + */ + void clear_match_networks() + { + match_networks.clear(); + } + + //-------------------------------------------------------------------------- + // Rank & requirements set & get + //-------------------------------------------------------------------------- + const string& get_rank() + { + return rank; + }; + + void set_rank(const string& r) + { + rank = r; + } + + const string& get_requirements() + { + return requirements; + }; + + void set_requirements(const string& r) + { + requirements = r; + } + +private: + ResourceMatch match_networks; + + string rank; + + string requirements; +}; + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + class VirtualMachineXML : public ObjectXML { public: @@ -106,6 +180,20 @@ public: return ds_rank; }; + const string& get_nic_rank(int nic_id) + { + static std::string es; + + std::map::iterator it = nics.find(nic_id); + + if ( it != nics.end() ) + { + return it->second->get_rank(); + } + + return es; + }; + const string& get_requirements() { return requirements; @@ -116,6 +204,20 @@ public: return ds_requirements; } + const string& get_nic_requirements(int nic_id) + { + static std::string es; + + std::map::iterator it = nics.find(nic_id); + + if ( it != nics.end() ) + { + return it->second->get_requirements(); + } + + return es; + } + /** * Return VM usage requirments */ @@ -204,6 +306,20 @@ public: match_datastores.add_resource(oid); } + /** + * Adds a matching network + * @param oid of the network + */ + void add_match_network(int oid, int nic_id) + { + std::map::iterator it = nics.find(nic_id); + + if ( it != nics.end() ) + { + it->second->add_match_network(oid); + } + } + /** * Returns a vector of matched hosts */ @@ -220,6 +336,40 @@ public: return match_datastores.get_resources(); } + /** + * Returns a vector of matched networks + */ + const vector& get_match_networks(int nic_id) + { + static std::vector ev; + + std::map::iterator it = nics.find(nic_id); + + if ( it != nics.end() ) + { + return it->second->get_match_networks(); + } + + return ev; + } + + /** + * Returns a VirtualMachineNicXML + */ + VirtualMachineNicXML * get_nic(int nic_id) + { + VirtualMachineNicXML * n = 0; + + std::map::iterator it = nics.find(nic_id); + + if ( it != nics.end() ) + { + n = it->second; + } + + return n; + } + /** * Sort the matched hosts for the VM */ @@ -236,6 +386,19 @@ public: match_datastores.sort_resources(); } + /** + * Sort the matched networks for the VM + */ + void sort_match_networks(int nic_id) + { + std::map::iterator it = nics.find(nic_id); + + if ( it != nics.end() ) + { + it->second->sort_match_networks(); + } + } + /** * Removes the matched hosts */ @@ -252,6 +415,19 @@ public: match_datastores.clear(); } + /** + * Removes the matched networks + */ + void clear_match_networks() + { + map::iterator it; + + for (it = nics.begin(); it != nics.end(); it++ ) + { + it->second->clear_match_networks(); + } + } + /** * Marks the VM to be only deployed on public cloud hosts */ @@ -368,6 +544,14 @@ public: */ bool clear_log(); + /** + * Return ids of NICs with NETWORK_MODE=auto (i.e. need to schedule networks) + */ + set get_nics_ids() + { + return nics_ids_auto; + } + protected: /** @@ -383,6 +567,7 @@ protected: ResourceMatch match_datastores; + bool only_public_cloud; set affined_vms; @@ -415,9 +600,12 @@ protected: string ds_requirements; string ds_rank; + set nics_ids_auto; + + map nics; + VirtualMachineTemplate * vm_template; /**< The VM template */ VirtualMachineTemplate * user_template; /**< The VM user template */ - }; #endif /* VM_XML_H_ */ diff --git a/src/scheduler/include/VirtualNetworkPoolXML.h b/src/scheduler/include/VirtualNetworkPoolXML.h new file mode 100644 index 0000000000..ff0dfe838d --- /dev/null +++ b/src/scheduler/include/VirtualNetworkPoolXML.h @@ -0,0 +1,59 @@ +/* -------------------------------------------------------------------------- */ +/* Copyright 2002-2018, OpenNebula Project, OpenNebula Systems */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------- */ + + +#ifndef VNET_POOL_XML_H_ +#define VNET_POOL_XML_H_ + +#include "PoolXML.h" +#include "VirtualNetworkXML.h" + +using namespace std; + +class VirtualNetworkPoolXML : public PoolXML +{ +public: + + VirtualNetworkPoolXML(Client* client):PoolXML(client) {}; + + ~VirtualNetworkPoolXML(){}; + + int set_up(); + + /** + * Gets an object from the pool + * @param oid the object unique identifier + * + * @return a pointer to the object, 0 in case of failure + */ + VirtualNetworkXML * get(int oid) const + { + return static_cast(PoolXML::get(oid)); + }; + +protected: + + int get_suitable_nodes(vector& content) + { + return get_nodes("/VNET_POOL/VNET", content); + }; + + void add_object(xmlNodePtr node); + + int load_info(xmlrpc_c::value &result); +}; + +#endif /* VNET_POOL_XML_H_ */ \ No newline at end of file diff --git a/src/scheduler/include/VirtualNetworkXML.h b/src/scheduler/include/VirtualNetworkXML.h new file mode 100644 index 0000000000..9fd7f68fb3 --- /dev/null +++ b/src/scheduler/include/VirtualNetworkXML.h @@ -0,0 +1,122 @@ +/* -------------------------------------------------------------------------- */ +/* Copyright 2002-2018, OpenNebula Project, OpenNebula Systems */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------- */ + + +#ifndef VNET_XML_H_ +#define VNET_XML_H_ + +#include "ObjectXML.h" +#include "PoolObjectAuth.h" + +using namespace std; + +class VirtualNetworkXML : public ObjectXML +{ +public: + VirtualNetworkXML(const string &xml_doc):ObjectXML(xml_doc) + { + init_attributes(); + }; + + VirtualNetworkXML(const xmlNodePtr node):ObjectXML(node) + { + init_attributes(); + }; + + /** + * Tests whether a new NIC can be attached to a vnet + * @param error error message + * @return true if the VNET can host the VM + */ + bool test_leases(string & error) const; + + /** + * Tests whether a new NIC can be attached to a vnet + * @param num_leases num leases needs by VM + * @return true if the VNET can host the VM + */ + bool test_leases() const + { + string tmp_st; + return test_leases(tmp_st); + } + + /** + * Adds a new lease to the VNET + */ + void add_lease() + { + free_leases--; + }; + + void rollback_leases(int num_leases) + { + free_leases += num_leases; + } + + int get_oid() const + { + return oid; + }; + + bool is_in_cluster(int cid) const + { + return cluster_ids.count(cid) != 0; + }; + + /** + * Fills a auth class to perform an authZ/authN request based on the object + * attributes + * @param auths to be filled + */ + void get_permissions(PoolObjectAuth& auth); + + /** + * Prints the Virtual Network information to an output stream. This function is used + * for logging purposes. + */ + friend ostream& operator<<(ostream& o, const VirtualNetworkXML& p); + +private: + + int oid; + set cluster_ids; + + int uid; + int gid; + + int owner_u; + int owner_m; + int owner_a; + + int group_u; + int group_m; + int group_a; + + int other_u; + int other_m; + int other_a; + + int free_leases; + + static const char *net_paths[]; /**< paths for search function */ + + static int net_num_paths; /**< number of paths*/ + + void init_attributes(); +}; + +#endif /* VNET_XML_H_ */ diff --git a/src/scheduler/src/pool/SConstruct b/src/scheduler/src/pool/SConstruct index 2b8a731abe..46912b7582 100644 --- a/src/scheduler/src/pool/SConstruct +++ b/src/scheduler/src/pool/SConstruct @@ -31,7 +31,9 @@ source_files=[ 'VMGroupPoolXML.cc', 'VMGroupXML.cc', 'DatastorePoolXML.cc', - 'DatastoreXML.cc'] + 'DatastoreXML.cc', + 'VirtualNetworkPoolXML.cc', + 'VirtualNetworkXML.cc'] # Build library sched_env.StaticLibrary(lib_name, source_files) diff --git a/src/scheduler/src/pool/VirtualMachinePoolXML.cc b/src/scheduler/src/pool/VirtualMachinePoolXML.cc index cd28b61b7b..59eb862fc4 100644 --- a/src/scheduler/src/pool/VirtualMachinePoolXML.cc +++ b/src/scheduler/src/pool/VirtualMachinePoolXML.cc @@ -148,7 +148,7 @@ int VirtualMachinePoolXML::load_info(xmlrpc_c::value &result) /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -int VirtualMachinePoolXML::dispatch(int vid, int hid, int dsid, bool resched) const +int VirtualMachinePoolXML::dispatch(int vid, int hid, int dsid, bool resched,const string& extra_template) const { xmlrpc_c::value deploy_result; @@ -174,12 +174,13 @@ int VirtualMachinePoolXML::dispatch(int vid, int hid, int dsid, bool resched) co else { client->call("one.vm.deploy", // methodName - "iibi", // arguments format + "iibis", // arguments format &deploy_result, // resultP vid, // argument 1 (VM) hid, // argument 2 (HOST) false, // argument 3 (ENFORCE) - dsid); // argument 5 (SYSTEM SD) + dsid, // argument 5 (SYSTEM SD) + extra_template.c_str()); // argument 6 (EXTRA TEMPLATE) } } catch (exception const& e) diff --git a/src/scheduler/src/pool/VirtualMachineXML.cc b/src/scheduler/src/pool/VirtualMachineXML.cc index 207bc1c4d5..0cffa1b497 100644 --- a/src/scheduler/src/pool/VirtualMachineXML.cc +++ b/src/scheduler/src/pool/VirtualMachineXML.cc @@ -99,6 +99,55 @@ void VirtualMachineXML::init_attributes() ds_requirements = automatic_ds_requirements; } + // ------------------- NIC REQUIREMENTS ------------------------------------- + + if (get_nodes("/VM/TEMPLATE/NIC", nodes) > 0) + { + std::string net_mode; + + vector::iterator it_nodes; + + for (it_nodes = nodes.begin(); it_nodes != nodes.end(); ++it_nodes) + { + VirtualMachineTemplate * nic_template = new VirtualMachineTemplate; + + nic_template->from_xml_node(*it_nodes); + + bool rc = nic_template->get("NETWORK_MODE", net_mode); + one_util::toupper(net_mode); + + if ( rc && net_mode == "AUTO" ) + { + std::string requirements, rank; + int nic_id; + + nic_template->get("NIC_ID", nic_id); + + nics_ids_auto.insert(nic_id); + + VirtualMachineNicXML * the_nic = new VirtualMachineNicXML(); + + nics.insert(make_pair(nic_id, the_nic)); + + if ( nic_template->get("SCHED_REQUIREMENTS", requirements) ) + { + the_nic->set_requirements(requirements); + } + + if ( nic_template->get("SCHED_RANK", rank) ) + { + the_nic->set_rank(rank); + } + } + + delete nic_template; + } + + free_nodes(nodes); + } + + nodes.clear(); + // ---------------- HISTORY HID, DSID, RESCHED & TEMPLATE ------------------ xpath(hid, "/VM/HISTORY_RECORDS/HISTORY/HID", -1); @@ -200,6 +249,25 @@ ostream& operator<<(ostream& os, VirtualMachineXML& vm) os << endl; + set nics_ids = vm.get_nics_ids(); + + for (set::iterator it = nics_ids.begin(); it != nics_ids.end(); it++) + { + os << "\tNIC_ID: "<< *it << endl + << "\t-----------------------------------" << endl; + os << "\tPRI\tID - NETWORKS"<< endl + << "\t------------------------" << endl; + + const vector net_resources = vm.nics[*it]->get_match_networks(); + + for (i = net_resources.rbegin(); i != net_resources.rend() ; i++) + { + os << "\t" << (*i)->priority << "\t" << (*i)->oid << endl; + } + + os << endl; + } + return os; }; diff --git a/src/scheduler/src/pool/VirtualNetworkPoolXML.cc b/src/scheduler/src/pool/VirtualNetworkPoolXML.cc new file mode 100644 index 0000000000..7f7121cf71 --- /dev/null +++ b/src/scheduler/src/pool/VirtualNetworkPoolXML.cc @@ -0,0 +1,102 @@ +/* -------------------------------------------------------------------------- */ +/* Copyright 2002-2018, OpenNebula Project, OpenNebula Systems */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------- */ + +#include "VirtualNetworkPoolXML.h" +#include + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +int VirtualNetworkPoolXML::set_up() +{ + ostringstream oss; + int rc; + + rc = PoolXML::set_up(); + + if ( rc == 0 ) + { + if (NebulaLog::log_level() >= Log::DDDEBUG) + { + oss << "Discovered VNETS:" << endl; + + oss << right << setw(8) << "ID" << " " + << right << setw(8) << "Leases" << " " << endl + << setw(20) << setfill('-') << "-" << setfill(' ') << endl; + + map::iterator it; + + for (it=objects.begin();it!=objects.end();it++) + { + VirtualNetworkXML * n = dynamic_cast(it->second); + + oss << *n << endl; + } + } + else + { + oss << "Discovered " << objects.size() << " vnets."; + } + + NebulaLog::log("VNET",Log::DEBUG,oss); + } + + return rc; +} + + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +void VirtualNetworkPoolXML::add_object(xmlNodePtr node) +{ + if ( node == 0 || node->children == 0 ) + { + NebulaLog::log("VNET",Log::ERROR, + "XML Node does not represent a valid VNET"); + return; + } + + VirtualNetworkXML* vnet = new VirtualNetworkXML(node); + + objects.insert(pair(vnet->get_oid(), vnet)); +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +int VirtualNetworkPoolXML::load_info(xmlrpc_c::value &result) +{ + try + { + client->call("one.vnpool.info", "iii", &result, -2, -1, -1); + + return 0; + } + catch (exception const& e) + { + ostringstream oss; + oss << "Exception raised: " << e.what(); + + NebulaLog::log("VNET", Log::ERROR, oss); + + return -1; + } +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + diff --git a/src/scheduler/src/pool/VirtualNetworkXML.cc b/src/scheduler/src/pool/VirtualNetworkXML.cc new file mode 100644 index 0000000000..1b79549ae4 --- /dev/null +++ b/src/scheduler/src/pool/VirtualNetworkXML.cc @@ -0,0 +1,146 @@ +/* -------------------------------------------------------------------------- */ +/* Copyright 2002-2018, OpenNebula Project, OpenNebula Systems */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------- */ + +#include +#include + +#include "VirtualNetworkXML.h" +#include "NebulaUtil.h" +#include "NebulaLog.h" +#include "ObjectCollection.h" + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +int VirtualNetworkXML::net_num_paths = 2; + +const char * VirtualNetworkXML::net_paths[] = { + "/VNET/TEMPLATE/", + "/VNET/" +}; + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +void VirtualNetworkXML::init_attributes() +{ + xpath(oid, "/VNET/ID", -1); + + ObjectCollection cluster_collection("CLUSTERS"); + cluster_collection.from_xml(this, "/VNET/"); + + cluster_ids = cluster_collection.clone(); + + xpath(uid, "/VNET/UID", -1); + xpath(gid, "/VNET/GID", -1); + + xpath(owner_u, "/VNET/PERMISSIONS/OWNER_U", 0); + xpath(owner_m, "/VNET/PERMISSIONS/OWNER_M", 0); + xpath(owner_a, "/VNET/PERMISSIONS/OWNER_A", 0); + + xpath(group_u, "/VNET/PERMISSIONS/GROUP_U", 0); + xpath(group_m, "/VNET/PERMISSIONS/GROUP_M", 0); + xpath(group_a, "/VNET/PERMISSIONS/GROUP_A", 0); + + xpath(other_u, "/VNET/PERMISSIONS/OTHER_U", 0); + xpath(other_m, "/VNET/PERMISSIONS/OTHER_M", 0); + xpath(other_a, "/VNET/PERMISSIONS/OTHER_A", 0); + + //-------------------- AR_POOL used leases ------------------------------ + vector ar_size; + vector ar_used_leases; + + xpaths(ar_size,"/VNET/AR_POOL/AR/SIZE"); + xpaths(ar_used_leases,"/VNET/AR_POOL/AR/USED_LEASES"); + + int used_leases; + + free_leases = 0; + + for (size_t i = 0; i < ar_size.size() ; i++) + { + free_leases += atoi(ar_size[i].c_str()); + } + + xpath(used_leases, "/VNET/USED_LEASES", 0); + + free_leases -= used_leases; + + ObjectXML::paths = net_paths; + ObjectXML::num_paths = net_num_paths; +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +bool VirtualNetworkXML::test_leases(string & error) const +{ + bool fits = (free_leases > 0); + + if (!fits) + { + if (NebulaLog::log_level() >= Log::DDEBUG) + { + ostringstream oss; + + oss << "Not enough free leases. " + << "Requested: 1 LEASES, " + << "Available: " << free_leases << " LEASES"; + + error = oss.str(); + } + else + { + error = "Not enough free leases."; + } + } + + return fits; +} + + +void VirtualNetworkXML::get_permissions(PoolObjectAuth& auth) +{ + auth.obj_type = PoolObjectSQL::NET; + + auth.oid = oid; + auth.uid = uid; + auth.gid = gid; + auth.cids = cluster_ids; + + auth.owner_u = owner_u; + auth.owner_m = owner_m; + auth.owner_a = owner_a; + + auth.group_u = group_u; + auth.group_m = group_m; + auth.group_a = group_a; + + auth.other_u = other_u; + auth.other_m = other_m; + auth.other_a = other_a; +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +ostream& operator<<(ostream& o, const VirtualNetworkXML& p) +{ + o << right << setw(8) << p.oid << " " + << right << setw(8) << p.free_leases << " "; + + return o; +} diff --git a/src/scheduler/src/sched/Scheduler.cc b/src/scheduler/src/sched/Scheduler.cc index ea91508674..d67ad94e2c 100644 --- a/src/scheduler/src/sched/Scheduler.cc +++ b/src/scheduler/src/sched/Scheduler.cc @@ -325,6 +325,8 @@ void Scheduler::start() vm_roles_pool = new VirtualMachineRolePoolXML(client, machines_limit); vmpool = new VirtualMachinePoolXML(client, machines_limit, live_rescheds==1); + vnetpool = new VirtualNetworkPoolXML(client); + vmgpool = new VMGroupPoolXML(client); vmapool = new VirtualMachineActionsPoolXML(client, machines_limit); @@ -482,6 +484,13 @@ int Scheduler::set_up_pools() return rc; } + rc = vnetpool->set_up(); + + if ( rc != 0 ) + { + return rc; + } + return 0; }; @@ -716,6 +725,107 @@ static bool match_system_ds(AclXML * acls, UserPoolXML * upool, return true; } + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +/** + * Match network for this VM that: + * 1. Meet user/policy requirements + * 2. Have enough leases to host the VM + * + * @param acl pool + * @param users the user pool + * @param vm the virtual machine + * @param vdisk vm requirement + * @param net to evaluate vm assgiment + * @param n_auth number of nets authorized for the user, incremented if needed + * @param n_error number of requirement errors, incremented if needed + * @param n_matched number of networks that fullfil VM sched_requirements + * @param n_fits number of networks with leases that fits the VM requirements + * @param error, string describing why the host is not valid + * @return true for a positive match + */ +static bool match_network(AclXML * acls, UserPoolXML * upool, + VirtualMachineXML* vm, int nic_id, VirtualNetworkXML * net, int& n_auth, + int& n_error, int& n_fits, int &n_matched, string &error) +{ + // ------------------------------------------------------------------------- + // Check if user is authorized + // ------------------------------------------------------------------------- + if ( vm->get_uid() != 0 && vm->get_gid() != 0 ) + { + PoolObjectAuth netperms; + + net->get_permissions(netperms); + + UserXML * user = upool->get(vm->get_uid()); + + if (user == 0) + { + error = "User does not exists."; + return false; + } + + const vector vgids = user->get_gids(); + + set gids(vgids.begin(), vgids.end()); + + if ( !acls->authorize(vm->get_uid(), gids, netperms, AuthRequest::USE)) + { + error = "Permission denied."; + return false; + } + } + + n_auth++; + + if ( !net->test_leases(error) ) + { + return false; + } + + n_fits++; + + // ------------------------------------------------------------------------- + // Evaluate VM requirements for NICS + // ------------------------------------------------------------------------- + if (!vm->get_nic_requirements(nic_id).empty()) + { + char * estr; + + bool matched = true; + + if (net->eval_bool(vm->get_nic_requirements(nic_id), matched, &estr) != 0) + { + ostringstream oss; + + n_error++; + + oss << "Error in REQUIREMENTS - NIC_ID(" << nic_id <<"): '" + << vm->get_nic_requirements(nic_id) << "', error: " << estr; + + vm->log(oss.str()); + + error = oss.str(); + + free(estr); + + return false; + } + + if (matched == false) + { + error = "It does not fulfill NIC REQUIREMENTS."; + return false; + } + } + + n_matched++; + + return true; +} + /* -------------------------------------------------------------------------- */ static void log_match(int vid, const string& msg) @@ -747,6 +857,7 @@ void Scheduler::match_schedule() HostXML * host; DatastoreXML *ds; + VirtualNetworkXML *net; string m_error; @@ -759,12 +870,15 @@ void Scheduler::match_schedule() const map hosts = hpool->get_objects(); const map datastores = dspool->get_objects(); const map users = upool->get_objects(); + const map nets = vnetpool->get_objects(); double total_cl_match_time = 0; double total_host_match_time = 0; double total_host_rank_time = 0; double total_ds_match_time = 0; double total_ds_rank_time = 0; + double total_net_match_time = 0; + double total_net_rank_time = 0; time_t stime = time(0); @@ -1014,6 +1128,117 @@ void Scheduler::match_schedule() vm->sort_match_datastores(); total_ds_rank_time += profile(false); + + // --------------------------------------------------------------------- + // Match Networks for this VM + // --------------------------------------------------------------------- + + profile(true); + + set::iterator it_nic; + set nics_ids = vm->get_nics_ids(); + + bool not_matched = false; + + for (it_nic = nics_ids.begin(); it_nic != nics_ids.end(); ++it_nic) + { + n_resources = 0; + + n_auth = 0; + n_matched = 0; + n_error = 0; + n_fits = 0; + + int nic_id = *it_nic; + + for (obj_it = nets.begin(); obj_it != nets.end(); ++obj_it) + { + net = static_cast(obj_it->second); + + if (match_network(acls, upool, vm, nic_id, net, n_auth, n_error, + n_fits, n_matched, m_error)) + { + vm->add_match_network(net->get_oid(), nic_id); + + n_resources++; + } + else + { + if (n_error > 0) + { + log_match(vm->get_oid(), "Cannot schedule VM. " + m_error); + break; + } + else if (NebulaLog::log_level() >= Log::DDEBUG) + { + ostringstream oss; + oss << "Network " << net->get_oid() << " discarded for VM " + << vm->get_oid() << " and NIC " << nic_id << ". " << m_error; + + NebulaLog::log("SCHED", Log::DDEBUG, oss); + } + } + } + + if (n_resources == 0) + { + if (n_error == 0)//No syntax error + { + if (nets.size() == 0) + { + vm->log("No networks found to run VMs"); + } + else if (n_auth == 0) + { + vm->log("User is not authorized to use any network"); + } + else if (n_fits == 0) + { + vm->log("No network with enough capacity for the VM"); + } + else if (n_matched == 0) + { + ostringstream oss; + + oss << "No network meet leases " + << "and SCHED_NIC_REQUIREMENTS: " + << vm->get_nic_requirements(nic_id); + + vm->log(oss.str()); + } + } + + vm->clear_match_hosts(); + vm->clear_match_datastores(); + + vmpool->update(vm); + + log_match(vm->get_oid(), "Cannot schedule VM, there is no " + "suitable network."); + + break; + + not_matched = true; + } + + profile(true); + + for (it = nic_policies.begin() ; it != nic_policies.end() ; it++) + { + (*it)->schedule(vm->get_nic(nic_id)); + } + + vm->sort_match_networks(nic_id); + + total_net_rank_time += profile(false); + } + + if ( not_matched ) + { + continue; + } + + total_net_match_time += profile(false); } if (NebulaLog::log_level() >= Log::DDEBUG) @@ -1021,20 +1246,24 @@ void Scheduler::match_schedule() ostringstream oss; oss << "Match Making statistics:\n" - << "\tNumber of VMs: " + << "\tNumber of VMs: " << pending_vms.size() << endl - << "\tTotal time: " - << one_util::float_to_str(time(0) - stime) << "s" << endl - << "\tTotal Cluster Match time: " - << one_util::float_to_str(total_cl_match_time) << "s" << endl - << "\tTotal Host Match time: " + << "\tTotal time: " + << one_util::float_to_str(time(0) - stime) << "s" << endl + << "\tTotal Cluster Match time: " + << one_util::float_to_str(total_cl_match_time) << "s" << endl + << "\tTotal Host Match time: " << one_util::float_to_str(total_host_match_time) << "s" << endl - << "\tTotal Host Ranking time: " - << one_util::float_to_str(total_host_rank_time) << "s" << endl - << "\tTotal DS Match time: " - << one_util::float_to_str(total_ds_match_time) << "s" << endl - << "\tTotal DS Ranking time: " - << one_util::float_to_str(total_ds_rank_time) << "s" << endl; + << "\tTotal Host Ranking time: " + << one_util::float_to_str(total_host_rank_time) << "s" << endl + << "\tTotal DS Match time: " + << one_util::float_to_str(total_ds_match_time) << "s" << endl + << "\tTotal DS Ranking time: " + << one_util::float_to_str(total_ds_rank_time) << "s" << endl + << "\tTotal Network Match time: " + << one_util::float_to_str(total_net_match_time) << "s" << endl + << "\tTotal Network Ranking time:" + << one_util::float_to_str(total_net_rank_time) << "s" << endl; NebulaLog::log("SCHED", Log::DDEBUG, oss); } @@ -1064,6 +1293,7 @@ void Scheduler::dispatch() { HostXML * host; DatastoreXML * ds; + VirtualNetworkXML * net; VirtualMachineXML * vm; ostringstream dss; @@ -1073,16 +1303,18 @@ void Scheduler::dispatch() long long dsk; vector pci; - int hid, dsid, cid; + int hid, dsid, cid, netid; unsigned int dispatched_vms = 0; bool dispatched, matched; char * estr; - vector::const_reverse_iterator i, j, k; + vector::const_reverse_iterator i, j, k, n; vector::iterator sp_it; + ostringstream extra; + //-------------------------------------------------------------------------- // Schedule pending VMs according to the VM policies (e.g. User priority) //-------------------------------------------------------------------------- @@ -1282,11 +1514,111 @@ void Scheduler::dispatch() continue; } + //------------------------------------------------------------------ + // Get the highest ranked network + //------------------------------------------------------------------ + extra.clear(); + + set nics_ids = vm->get_nics_ids(); + + map matched_networks; + + unsigned int num_mached_networks = 0; + + set::iterator it; + + for(it = nics_ids.begin(); it != nics_ids.end(); ++it) + { + int nic_id = *it; + + const vector net_resources = vm->get_match_networks(nic_id); + + netid = -1; + + for (n = net_resources.rbegin() ; n != net_resources.rend(); n++) + { + net = vnetpool->get((*n)->oid); + + if ( net == 0 ) + { + continue; + } + + //-------------------------------------------------------------- + // Test cluster membership for datastore and selected host + //-------------------------------------------------------------- + if (! net->is_in_cluster(cid)) + { + continue; + } + + //-------------------------------------------------------------- + // Test network leases + //-------------------------------------------------------------- + if ( !net->test_leases() ) + { + continue; + } + + net->add_lease(); + + //-------------------------------------------------------------- + //Select this DS to dispatch VM + //-------------------------------------------------------------- + netid = (*n)->oid; + + break; + } + + if ( netid == -1 ) + { + break; + } + + if ( matched_networks.find(netid) != matched_networks.end() ) + { + matched_networks[netid] += 1; + } + else + { + matched_networks[netid] = 1; + } + + num_mached_networks++; + + extra << "NIC=[NIC_ID=\"" << nic_id + << "\", NETWORK_MODE=\"auto\" , NETWORK_ID=\"" << netid + << "\"]"; + } + + if ( num_mached_networks < nics_ids.size()) + { + map::iterator it; + + for (it = matched_networks.begin(); it != matched_networks.end(); it++) + { + net = vnetpool->get(it->first); + + net->rollback_leases(it->second); + } + + continue; + } + //------------------------------------------------------------------ // Dispatch and update host and DS capacity, and dispatch counters //------------------------------------------------------------------ - if (vmpool->dispatch((*k)->oid, hid, dsid, vm->is_resched()) != 0) + if (vmpool->dispatch((*k)->oid, hid, dsid, vm->is_resched(), extra.str()) != 0) { + map::iterator it; + + for ( it = matched_networks.begin(); it != matched_networks.end(); it++) + { + net = vnetpool->get(it->first); + + net->rollback_leases(it->second); + } + continue; } @@ -1356,8 +1688,8 @@ void Scheduler::dispatch() if (!dispatched) { vm->log("Cannot dispatch VM to any Host. Possible reasons: Not " - "enough capacity in Host or System DS, or dispatch limit " - "reached"); + "enough capacity in Host or System DS, dispatch limit " + "reached, or limit of free leases reached."); vmpool->update(vm); } } diff --git a/src/scheduler/src/sched/SchedulerTemplate.cc b/src/scheduler/src/sched/SchedulerTemplate.cc index 1c8440cc7c..b2c5f350a6 100644 --- a/src/scheduler/src/sched/SchedulerTemplate.cc +++ b/src/scheduler/src/sched/SchedulerTemplate.cc @@ -109,6 +109,13 @@ void SchedulerTemplate::set_conf_default() vattribute = new VectorAttribute("DEFAULT_DS_SCHED",vvalue); conf_default.insert(make_pair(vattribute->name(),vattribute)); + //DEFAULT_NIC_SCHED + vvalue.clear(); + vvalue.insert(make_pair("POLICY","1")); + + vattribute = new VectorAttribute("DEFAULT_NIC_SCHED",vvalue); + conf_default.insert(make_pair(vattribute->name(),vattribute)); + //"MEMORY_SYSTEM_DS_SCALE" value = "0"; @@ -217,3 +224,48 @@ string SchedulerTemplate::get_ds_policy() const return rank; } + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +string SchedulerTemplate::get_nics_policy() const +{ + int policy; + string rank; + + istringstream iss; + + const VectorAttribute * sched = get("DEFAULT_NIC_SCHED"); + + if (sched == 0) + { + return ""; + } + + iss.str(sched->vector_value("POLICY")); + iss >> policy; + + switch (policy) + { + case 0: //Packing + rank = "- USED_LEASES"; + break; + + case 1: //Striping + rank = "USED_LEASES"; + break; + + case 2: //Custom + rank = sched->vector_value("RANK"); + break; + + case 3: //Fixed + rank = "PRIORITY"; + break; + + default: + rank = ""; + } + + return rank; +} diff --git a/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/network/nic-tab.js b/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/network/nic-tab.js index ff36fc546f..208e693aa4 100644 --- a/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/network/nic-tab.js +++ b/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/network/nic-tab.js @@ -91,6 +91,7 @@ define(function(require) { * @param {Object} context jquery selector * @param {Object} options * options.hide_pci {bool} true to disable the pci checkbox + * options.hide_auto {bool} true to disable the selection mode auto checkbox */ function _setup(context, options) { var that = this; @@ -99,6 +100,10 @@ define(function(require) { $("input.pci-type-nic", context).attr('disabled', 'disabled'); } + if (options != undefined && options.hide_auto == true){ + $(".only_create", context).hide(); + } + that.vnetsTable.initialize({ 'selectOptions': { 'select_callback': function(aData, options) { @@ -156,6 +161,20 @@ define(function(require) { if (!Config.isAdvancedEnabled("show_attach_nic_advanced")){ $("#nic_values", context).hide(); } + + $("input#"+this.nicTabId+"_network_mode", context).on("change", function(){ + var network_mode_on = $(this).prop("checked"); + + if(network_mode_on){ + $(".no_auto", context).hide(); + $(".auto", context).show(); + } else { + $(".auto", context).hide(); + $(".no_auto", context).show(); + } + }); + + $(".auto", context).hide(); } function _retrieve(context) { @@ -184,6 +203,20 @@ define(function(require) { nicJSON["NIC_PCI"] = true; } + if( $("input#"+this.nicTabId+"_network_mode", context).prop("checked") ){ + nicJSON["NETWORK_MODE"] = "auto"; + var req = $("input#"+this.nicTabId+"_SCHED_REQUIREMENTS", context).val(); + var rank = $("input#"+this.nicTabId+"_SCHED_RANK", context).val(); + + if ( req !== "" ){ + nicJSON["SCHED_REQUIREMENTS"] = req; + } + + if ( rank !== "" ){ + nicJSON["SCHED_RANK"] = rank; + } + } + return nicJSON; } @@ -247,6 +280,20 @@ define(function(require) { $("input.pci-type-nic", context).click(); } + if ( templateJSON["NETWORK_MODE"] && templateJSON["NETWORK_MODE"] === "auto" ) { + $("input#"+this.nicTabId+"_network_mode", context).prop("checked", true); + $(".no_auto", context).hide(); + $(".auto", context).show(); + + if ( templateJSON["SCHED_REQUIREMENTS"] ) { + $("input#"+this.nicTabId+"_SCHED_REQUIREMENTS", context).val(templateJSON["SCHED_REQUIREMENTS"]); + } + + if ( templateJSON["SCHED_RANK"] ) { + $("input#"+this.nicTabId+"_SCHED_RANK", context).val(templateJSON["SCHED_RANK"]); + } + } + WizardFields.fill(context, templateJSON); } }); diff --git a/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/network/nic-tab/html.hbs b/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/network/nic-tab/html.hbs index 5e8a79c9cc..aa20467cc5 100644 --- a/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/network/nic-tab/html.hbs +++ b/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/network/nic-tab/html.hbs @@ -13,12 +13,49 @@ {{! See the License for the specific language governing permissions and }} {{! limitations under the License. }} {{! -------------------------------------------------------------------------- }} - - {{{vnetsTableSelectHTML}}} + {{!-- {{#isFeatureEnabled "automatic_selection_vnet"}} --}} +
+
+
+
+ + +
+ +
+
+
+
+
+ + +
+
+
+
+ + +
+
+
+
+ {{!-- {{/isFeatureEnabled}} --}} +
+ {{{vnetsTableSelectHTML}}} +

{{#advancedSection (tr "Advanced options") }} -
+
{{tr "Choose Network"}}
@@ -57,7 +94,7 @@
-
+
{{tr "Override Network Values IPv4"}}
@@ -110,7 +147,7 @@
-
+
{{tr "Override Network Values IPv6"}}
diff --git a/src/sunstone/public/app/tabs/templates-tab/form-panels/instantiate/html.hbs b/src/sunstone/public/app/tabs/templates-tab/form-panels/instantiate/html.hbs index fbbbbe5d72..ec6270029c 100644 --- a/src/sunstone/public/app/tabs/templates-tab/form-panels/instantiate/html.hbs +++ b/src/sunstone/public/app/tabs/templates-tab/form-panels/instantiate/html.hbs @@ -44,7 +44,7 @@ -
+