diff --git a/share/hooks/vcenter/create_vcenter_net.rb b/share/hooks/vcenter/create_vcenter_net.rb
index e4f9e4d916..f46d55bb3f 100755
--- a/share/hooks/vcenter/create_vcenter_net.rb
+++ b/share/hooks/vcenter/create_vcenter_net.rb
@@ -186,8 +186,8 @@ def create_pg(one_vnet, esx_host)
if !vs
sw_name = esx_host.create_vss(sw_name,
- pnics,
nports,
+ pnics,
mtu,
pnics_available)
end
diff --git a/share/oneprovision/ansible/roles/opennebula-ssh/README.md b/share/oneprovision/ansible/roles/opennebula-ssh/README.md
index 1a94b03229..8a56b9119f 100644
--- a/share/oneprovision/ansible/roles/opennebula-ssh/README.md
+++ b/share/oneprovision/ansible/roles/opennebula-ssh/README.md
@@ -17,8 +17,3 @@ Requires the oneadmin user to be present.
All of the variables in this role are documented in the [defaults](defaults/main.yml) file.
-## Todo list
-
-None
-
-[1]: https://github.com/OpenNebula/infra/wiki/Creating-course-environments
diff --git a/share/oneprovision/ansible/roles/opennebula-ssh/tasks/deploy_local.yml b/share/oneprovision/ansible/roles/opennebula-ssh/tasks/deploy_local.yml
index e2dbb27c7d..563fe50ef3 100644
--- a/share/oneprovision/ansible/roles/opennebula-ssh/tasks/deploy_local.yml
+++ b/share/oneprovision/ansible/roles/opennebula-ssh/tasks/deploy_local.yml
@@ -1,7 +1,7 @@
---
- name: Update local known_hosts
- connection: local
+ delegate_to: localhost
become: no
known_hosts:
name: "{{ inventory_hostname }}"
diff --git a/src/cli/one_helper/oneimage_helper.rb b/src/cli/one_helper/oneimage_helper.rb
index 9d3b3f0408..ad096a1d14 100644
--- a/src/cli/one_helper/oneimage_helper.rb
+++ b/src/cli/one_helper/oneimage_helper.rb
@@ -98,9 +98,9 @@ class OneImageHelper < OpenNebulaHelper::OneHelper
end
},
{
- :name => 'driver',
- :large => '--driver driver',
- :description => 'Driver to use image (raw, qcow2, tap:aio:...)',
+ :name => 'format',
+ :large => '--format format',
+ :description => 'Format of the image (raw, qcow2, ...)',
:format => String
},
{
diff --git a/src/cli/one_helper/onevm_helper.rb b/src/cli/one_helper/onevm_helper.rb
index b02a32af59..e8b8a9fd95 100644
--- a/src/cli/one_helper/onevm_helper.rb
+++ b/src/cli/one_helper/onevm_helper.rb
@@ -252,7 +252,7 @@ class OneVMHelper < OpenNebulaHelper::OneHelper
end
vm_nics.each do |nic|
- %w[IP IP6_GLOBAL IP6_ULA IP6
+ %w[IP EXTERNAL IP6_GLOBAL IP6_ULA IP6
VROUTER_IP VROUTER_IP6_GLOBAL VROUTER_IP6_ULA].each do |attr|
if nic.key?(attr)
ips.push(nic[attr])
@@ -985,7 +985,7 @@ class OneVMHelper < OpenNebulaHelper::OneHelper
vm_nics.each do |nic|
next if nic.key?('CLI_DONE')
- %w[IP6_LINK IP6_ULA IP6_GLOBAL IP6].each do |attr|
+ %w[EXTERNAL IP6_LINK IP6_ULA IP6_GLOBAL IP6].each do |attr|
next unless nic.key?(attr)
shown_ips << nic[attr]
@@ -1013,6 +1013,7 @@ class OneVMHelper < OpenNebulaHelper::OneHelper
end
shown_ips << nic['IP'] if nic.key?('IP')
+ shown_ips << nic['EXTERNAL'] if nic.key?('EXTERNAL')
nic.merge!(nic_default) {|_k, v1, _v2| v1 }
array_id += 1
diff --git a/src/cli/oneacct b/src/cli/oneacct
index 0a98c5e161..a691a89ad8 100755
--- a/src/cli/oneacct
+++ b/src/cli/oneacct
@@ -109,12 +109,14 @@ CommandParser::CmdParser.new(ARGV) do
puts xml_str
end
else
+ # rubocop:disable Naming/VariableNumber
order_by = {}
order_by[:order_by_1] = 'VM/UID'
if options[:split] && !options[:csv]
order_by[:order_by_2] = 'VM/ID'
end
+ # rubocop:enable Naming/VariableNumber
acct_hash = pool.accounting(filter_flag,
common_opts.merge(order_by))
diff --git a/src/cli/oneshowback b/src/cli/oneshowback
index 777b486a2e..078e9affa8 100755
--- a/src/cli/oneshowback
+++ b/src/cli/oneshowback
@@ -110,11 +110,13 @@ CommandParser::CmdParser.new(ARGV) do
puts xml_str
end
else
+ # rubocop:disable Naming/VariableNumber
order_by = {}
if !options[:csv]
order_by[:order_by_1] = 'YEAR'
order_by[:order_by_2] = 'MONTH'
end
+ # rubocop:enable Naming/VariableNumber
data_hash = pool.showback(filter_flag,
common_opts.merge(order_by))
diff --git a/src/cli/oneuser b/src/cli/oneuser
index 58013069b4..2279751b3a 100755
--- a/src/cli/oneuser
+++ b/src/cli/oneuser
@@ -821,5 +821,4 @@ CommandParser::CmdParser.new(ARGV) do
user.disable
end
end
-
end
diff --git a/src/datastore/Datastore.cc b/src/datastore/Datastore.cc
index 8f54dabc74..7643ac0d05 100644
--- a/src/datastore/Datastore.cc
+++ b/src/datastore/Datastore.cc
@@ -202,11 +202,16 @@ void Datastore::disk_attribute(
* 3. DRIVER in DISK
* 4. Default set to "raw"
*/
- if (disk->vector_value("TYPE") != "CDROM" && disk->is_volatile())
+
+ string type = disk->vector_value("TYPE");
+
+ one_util::toupper(type);
+
+ if (type!= "CDROM" && disk->is_volatile())
{
string driver = get_ds_driver();
- if (disk->vector_value("TYPE") == "FS") /* Volatile Datablock */
+ if (type == "FS") /* Volatile Datablock */
{
if (!driver.empty()) /* DRIVER in TM_MAD_CONF or DS Template */
{
diff --git a/src/datastore_mad/remotes/vcenter_downloader.rb b/src/datastore_mad/remotes/vcenter_downloader.rb
index c684386d92..c17a300270 100755
--- a/src/datastore_mad/remotes/vcenter_downloader.rb
+++ b/src/datastore_mad/remotes/vcenter_downloader.rb
@@ -62,7 +62,7 @@ rescue StandardError => e
" from datastore #{ds_id} "\
"Reason: \"#{e.message}\"}"
if VCenterDriver::CONFIG[:debug_information]
- STDERR.puts "#{e.backtrace}"
+ STDERR.puts e.backtrace.to_s
end
exit(-1)
ensure
diff --git a/src/datastore_mad/remotes/vcenter_uploader.rb b/src/datastore_mad/remotes/vcenter_uploader.rb
index ed081fc021..03df327d16 100755
--- a/src/datastore_mad/remotes/vcenter_uploader.rb
+++ b/src/datastore_mad/remotes/vcenter_uploader.rb
@@ -64,7 +64,7 @@ rescue StandardError => e
STDERR.puts "Cannot upload image to datastore #{ds_id} "\
"Reason: \"#{e.message}\""
if VCenterDriver::CONFIG[:debug_information]
- STDERR.puts "#{e.backtrace}"
+ STDERR.puts e.backtrace.to_s
end
exit(-1)
ensure
diff --git a/src/ipamm_mad/remotes/aws/register_address_range b/src/ipamm_mad/remotes/aws/register_address_range
index 0fb4783e8c..15fff87982 100755
--- a/src/ipamm_mad/remotes/aws/register_address_range
+++ b/src/ipamm_mad/remotes/aws/register_address_range
@@ -91,16 +91,19 @@ require 'opennebula'
require 'oneprovision'
require 'ipaddr'
+# Add ^ and < operators to the IPAddr class
class IPAddr
+
attr_reader :addr
def ^(other)
- return self.clone.set(@addr ^ other.to_i)
+ clone.set(@addr ^ other.to_i)
end
def <(other)
- return @addr < other.addr
+ @addr < other.addr
end
+
end
begin
@@ -126,7 +129,7 @@ begin
cidr = IPAddr.new(cidr_s)
- if ! ['255.255.0.0', '16'].include? mask
+ if !['255.255.0.0', '16'].include? mask
STDERR.puts 'Elastic CIDR block has to be /16'
exit(-1)
end
diff --git a/src/ipamm_mad/remotes/packet/register_address_range b/src/ipamm_mad/remotes/packet/register_address_range
index f6cc5f1b8a..40570e780e 100755
--- a/src/ipamm_mad/remotes/packet/register_address_range
+++ b/src/ipamm_mad/remotes/packet/register_address_range
@@ -97,10 +97,13 @@ require 'ipaddr'
IP_TYPE = %w[public_ipv4 global_ipv4]
+# Add ^ operator to the IPAddr class
class IPAddr
+
def ^(other)
- return self.clone.set(@addr ^ other.to_i)
+ clone.set(@addr ^ other.to_i)
end
+
end
begin
diff --git a/src/mad/ruby/DriverExecHelper.rb b/src/mad/ruby/DriverExecHelper.rb
index 49ffecd02a..3d41cb2da0 100644
--- a/src/mad/ruby/DriverExecHelper.rb
+++ b/src/mad/ruby/DriverExecHelper.rb
@@ -110,6 +110,7 @@ module DriverExecHelper
# METHODS FOR LOGS & COMMAND OUTPUT
#
# Sends a message to the OpenNebula core through stdout
+ # rubocop:disable Metrics/ParameterLists
def send_message(action = '-', result = RESULT[:failure],
id = '-', info = '-')
@@ -118,6 +119,7 @@ module DriverExecHelper
STDOUT.flush
end
end
+ # rubocop:enable Metrics/ParameterLists
# Sends a log message to ONE. The +message+ can be multiline, it will
# be automatically splitted by lines.
diff --git a/src/market_mad/remotes/one/monitor b/src/market_mad/remotes/one/monitor
index ec5e642893..9e5309116c 100755
--- a/src/market_mad/remotes/one/monitor
+++ b/src/market_mad/remotes/one/monitor
@@ -118,7 +118,7 @@ class OneMarket
dname = disk.keys.first
tmpl << <<-EOT.strip
- DISK = [ NAME = "#{dname}", APP="#{disk[dname]}]
+ DISK = [ NAME = "#{dname}", APP="#{disk[dname]}" ]
EOT
end
end
@@ -140,11 +140,11 @@ class OneMarket
print_var(tmpl, 'MD5', app['md5'])
if app['roles']
- app['roles'].each do |disk|
- rname = disk.keys.first
+ app['roles'].each do |role|
+ rname = role.keys.first
tmpl << <<-EOT.strip
- ROLE = [ NAME = "#{rname}", APP="#{role[rname]}]
+ ROLE = [ NAME = "#{rname}", APP="#{role[rname]}" ]
EOT
end
end
diff --git a/src/oneprovision/lib/provision/ansible.rb b/src/oneprovision/lib/provision/ansible.rb
index fa5db2a651..e341a65bd8 100644
--- a/src/oneprovision/lib/provision/ansible.rb
+++ b/src/oneprovision/lib/provision/ansible.rb
@@ -93,7 +93,6 @@ module OneProvision
cmd = "ANSIBLE_CONFIG=#{ansible_dir}/ansible.cfg "
cmd << "ansible-playbook #{ANSIBLE_ARGS}"
cmd << " -i #{ansible_dir}/inventory"
- cmd << " -i #{ANSIBLE_LOCATION}/inventories/#{i}"
cmd << " -e @#{ansible_dir}/group_vars.yml"
cmd << " #{ANSIBLE_LOCATION}/#{i}.yml"
diff --git a/src/oneprovision/lib/terraform/providers/templates/packet/network.erb b/src/oneprovision/lib/terraform/providers/templates/packet/network.erb
index 3397aa6822..08d9314eaf 100644
--- a/src/oneprovision/lib/terraform/providers/templates/packet/network.erb
+++ b/src/oneprovision/lib/terraform/providers/templates/packet/network.erb
@@ -1,6 +1,10 @@
-resource "packet_reserved_ip_block" "device_<%= obj['ID'] %>" {
- project_id = "<%= provision['PACKET_PROJECT'] %>"
- facility = "<%= provision['FACILITY'] %>"
- quantity = "<%= provision['SIZE'] %>"
-}
+<%# resource "packet_reserved_ip_block" "device_<%= obj['ID'] %1>" { %>
+<%# project_id = "<%= provision['PROJECT'] %1>" %>
+<%# facility = "<%= provision['FACILITY'] %1>" %>
+<%# <% if obj['AR_POOL'] && obj['AR_POOL']['AR'] && obj['AR_POOL']['AR']['SIZE'] %1> %>
+<%# quantity = "<%= obj['AR_POOL']['AR']['SIZE'] %1>" %>
+<%# <% else %1> %>
+<%# quantity = "1" %>
+<%# <% end %1> %>
+<%# } %>
diff --git a/src/scheduler/src/pool/VirtualMachineXML.cc b/src/scheduler/src/pool/VirtualMachineXML.cc
index bd61c41a7f..d3fb9e5f01 100644
--- a/src/scheduler/src/pool/VirtualMachineXML.cc
+++ b/src/scheduler/src/pool/VirtualMachineXML.cc
@@ -605,7 +605,7 @@ void VirtualMachineXML::log(const string &st)
}
ostringstream oss;
- oss << one_util::log_time() << " : " << st;
+ oss << one_util::log_time() << ": " << st;
user_template->replace("SCHED_MESSAGE", oss.str());
}
diff --git a/src/sunstone/public/app/tabs/oneflow-services-tab/form-panels/update.js b/src/sunstone/public/app/tabs/oneflow-services-tab/form-panels/update.js
index 2318777448..0bc95a3489 100644
--- a/src/sunstone/public/app/tabs/oneflow-services-tab/form-panels/update.js
+++ b/src/sunstone/public/app/tabs/oneflow-services-tab/form-panels/update.js
@@ -160,6 +160,7 @@ define(function(require) {
$('select[name="deployment"]', context).val(element.TEMPLATE.BODY.deployment);
$("select[name='shutdown_action_service']", context).val(element.TEMPLATE.BODY.shutdown_action);
$("input[name='ready_status_gate']", context).prop("checked",element.TEMPLATE.BODY.ready_status_gate || false);
+ $("input[name='automatic_deletion']", context).prop("checked",element.TEMPLATE.BODY.automatic_deletion || false);
// Remove role tabs
$("#roles_tabs i.remove-tab", context).trigger("click");
diff --git a/src/sunstone/public/app/tabs/oneflow-services-tab/form-panels/update/html.hbs b/src/sunstone/public/app/tabs/oneflow-services-tab/form-panels/update/html.hbs
index eccd21cd65..03ff1da021 100644
--- a/src/sunstone/public/app/tabs/oneflow-services-tab/form-panels/update/html.hbs
+++ b/src/sunstone/public/app/tabs/oneflow-services-tab/form-panels/update/html.hbs
@@ -101,6 +101,14 @@
+
+
+
+
+
+
{{/advancedSection}}
diff --git a/src/sunstone/public/app/tabs/oneflow-templates-tab/form-panels/create.js b/src/sunstone/public/app/tabs/oneflow-templates-tab/form-panels/create.js
index 3ae2bf33ef..4a75613f8a 100644
--- a/src/sunstone/public/app/tabs/oneflow-templates-tab/form-panels/create.js
+++ b/src/sunstone/public/app/tabs/oneflow-templates-tab/form-panels/create.js
@@ -261,6 +261,7 @@ define(function(require) {
var deployment = $('select[name="deployment"]', context).val();
var shutdown_action_service = $('select[name="shutdown_action_service"]', context).val();
var ready_status_gate = $('input[name="ready_status_gate"]', context).prop("checked");
+ var automatic_deletion = $('input[name="automatic_deletion"]', context).prop("checked");
var custom_attrs = {};
var network_attrs = {};
@@ -317,6 +318,8 @@ define(function(require) {
json_template['ready_status_gate'] = ready_status_gate;
+ json_template['automatic_deletion'] = automatic_deletion;
+
// add labels
var currentInfo = Sunstone.getElementRightInfo(TAB_ID)
if (
@@ -381,6 +384,7 @@ define(function(require) {
$('select[name="deployment"]', context).val(element.TEMPLATE.BODY.deployment);
$("select[name='shutdown_action_service']", context).val(element.TEMPLATE.BODY.shutdown_action);
$("input[name='ready_status_gate']", context).prop("checked",element.TEMPLATE.BODY.ready_status_gate || false);
+ $("input[name='automatic_deletion']", context).prop("checked",element.TEMPLATE.BODY.automatic_deletion || false);
$(".service_networks i.remove-tab", context).trigger("click");
diff --git a/src/sunstone/public/app/tabs/oneflow-templates-tab/form-panels/create/wizard.hbs b/src/sunstone/public/app/tabs/oneflow-templates-tab/form-panels/create/wizard.hbs
index f1d30c287a..b3849b8ab7 100644
--- a/src/sunstone/public/app/tabs/oneflow-templates-tab/form-panels/create/wizard.hbs
+++ b/src/sunstone/public/app/tabs/oneflow-templates-tab/form-panels/create/wizard.hbs
@@ -100,6 +100,14 @@
+
+
+
+
+
+
{{/advancedSection}}
diff --git a/src/vm/VirtualMachine.cc b/src/vm/VirtualMachine.cc
index 77a600464e..cb1612c343 100644
--- a/src/vm/VirtualMachine.cc
+++ b/src/vm/VirtualMachine.cc
@@ -2722,7 +2722,7 @@ void VirtualMachine::set_template_error_message(const string& name,
SingleAttribute * attr;
ostringstream error_value;
- error_value << one_util::log_time() << " : " << message;
+ error_value << one_util::log_time() << ": " << message;
attr = new SingleAttribute(name, error_value.str());
diff --git a/src/vmm/VirtualMachineManagerProtocol.cc b/src/vmm/VirtualMachineManagerProtocol.cc
index 35720e48a5..a05b5d4ea0 100644
--- a/src/vmm/VirtualMachineManagerProtocol.cc
+++ b/src/vmm/VirtualMachineManagerProtocol.cc
@@ -41,7 +41,7 @@ static void log_message(vm_msg_t* msg)
void VirtualMachineManager::log_error(VirtualMachine* vm,
const string& payload,
- const char * msg)
+ const string& msg)
{
ostringstream oss;
@@ -60,7 +60,7 @@ void VirtualMachineManager::log_error(VirtualMachine* vm,
void VirtualMachineManager::log_error(int vm_id,
const string& payload,
- const char * msg)
+ const string& msg)
{
ostringstream oss;
@@ -156,7 +156,7 @@ void VirtualMachineManager::_deploy(unique_ptr msg)
else
{
action = &LifeCycleManager::trigger_deploy_failure;
- log_error(id, msg->payload(), "Error deploying virtual machine");
+ log_error(id, msg->payload(), vm_msg_t::type_str(VMManagerMessages::DEPLOY));
}
LifeCycleManager * lcm = Nebula::instance().get_lcm();
@@ -184,7 +184,7 @@ void VirtualMachineManager::_shutdown(unique_ptr msg)
}
else
{
- log_error(id, msg->payload(), "Error shutting down VM");
+ log_error(id, msg->payload(), vm_msg_t::type_str(VMManagerMessages::SHUTDOWN));
lcm->trigger_shutdown_failure(msg->oid());
}
@@ -213,7 +213,7 @@ void VirtualMachineManager::_reset(unique_ptr msg)
else
{
log_error(id, msg->payload(),
- "Error rebooting-hard VM, assume it's still running");
+ vm_msg_t::type_str(VMManagerMessages::RESET));
}
}
@@ -240,7 +240,7 @@ void VirtualMachineManager::_reboot(unique_ptr msg)
else
{
log_error(id, msg->payload(),
- "Error rebooting VM, assume it's still running");
+ vm_msg_t::type_str(VMManagerMessages::REBOOT));
}
}
@@ -264,7 +264,8 @@ void VirtualMachineManager::_cancel(unique_ptr msg)
}
else
{
- log_error(msg->oid(), msg->payload(), "Error canceling VM");
+ log_error(msg->oid(), msg->payload(),
+ vm_msg_t::type_str(VMManagerMessages::CANCEL));
lcm->trigger_shutdown_failure(id);
}
@@ -293,7 +294,8 @@ void VirtualMachineManager::_cleanup(unique_ptr msg)
}
else
{
- log_error(id, msg->payload(), "Error cleaning Host");
+ log_error(id, msg->payload(),
+ vm_msg_t::type_str(VMManagerMessages::CLEANUP));
}
lcm->trigger_cleanup_callback(id);
@@ -326,7 +328,8 @@ void VirtualMachineManager::_save(unique_ptr msg)
}
else
{
- log_error(id, msg->payload(), "Error saving VM state");
+ log_error(id, msg->payload(),
+ vm_msg_t::type_str(VMManagerMessages::SAVE));
lcm->trigger_save_failure(id);
}
@@ -352,7 +355,8 @@ void VirtualMachineManager::_restore(unique_ptr msg)
}
else
{
- log_error(id, msg->payload(), "Error restoring VM");
+ log_error(id, msg->payload(),
+ vm_msg_t::type_str(VMManagerMessages::RESTORE));
lcm->trigger_deploy_failure(id);
}
@@ -378,7 +382,8 @@ void VirtualMachineManager::_migrate(unique_ptr msg)
}
else
{
- log_error(msg->oid(), msg->payload(), "Error live migrating VM");
+ log_error(msg->oid(), msg->payload(),
+ vm_msg_t::type_str(VMManagerMessages::MIGRATE));
lcm->trigger_deploy_failure(id);
}
@@ -409,7 +414,8 @@ void VirtualMachineManager::_attachdisk(unique_ptr msg)
}
else
{
- log_error(id, msg->payload(), "Error attaching new VM Disk");
+ log_error(id, msg->payload(),
+ vm_msg_t::type_str(VMManagerMessages::ATTACHDISK));
lcm->trigger_attach_failure(id);
}
@@ -440,7 +446,8 @@ void VirtualMachineManager::_detachdisk(unique_ptr msg)
}
else
{
- log_error(id, msg->payload(), "Error detaching VM Disk");
+ log_error(id, msg->payload(),
+ vm_msg_t::type_str(VMManagerMessages::DETACHDISK));
lcm->trigger_detach_failure(id);
}
@@ -471,7 +478,8 @@ void VirtualMachineManager::_attachnic(unique_ptr msg)
}
else
{
- log_error(id, msg->payload(), "Error attaching new VM NIC");
+ log_error(id, msg->payload(),
+ vm_msg_t::type_str(VMManagerMessages::ATTACHNIC));
lcm->trigger_attach_nic_failure(id);
}
@@ -502,7 +510,8 @@ void VirtualMachineManager::_detachnic(unique_ptr msg)
}
else
{
- log_error(id, msg->payload(), "Error detaching VM NIC");
+ log_error(id, msg->payload(),
+ vm_msg_t::type_str(VMManagerMessages::DETACHNIC));
lcm->trigger_detach_nic_failure(id);
}
@@ -543,7 +552,8 @@ void VirtualMachineManager::_snapshotcreate(unique_ptr msg)
}
else
{
- log_error(msg->oid(), msg->payload(), "Error creating new VM Snapshot");
+ log_error(msg->oid(), msg->payload(),
+ vm_msg_t::type_str(VMManagerMessages::SNAPSHOTCREATE));
lcm->trigger_snapshot_create_failure(id);
}
@@ -574,7 +584,8 @@ void VirtualMachineManager::_snapshotrevert(unique_ptr msg)
}
else
{
- log_error(id, msg->payload(), "Error reverting VM Snapshot");
+ log_error(id, msg->payload(),
+ vm_msg_t::type_str(VMManagerMessages::SNAPSHOTREVERT));
lcm->trigger_snapshot_revert_failure(id);
}
@@ -605,7 +616,8 @@ void VirtualMachineManager::_snapshotdelete(unique_ptr msg)
}
else
{
- log_error(id, msg->payload(), "Error deleting VM Snapshot");
+ log_error(id, msg->payload(),
+ vm_msg_t::type_str(VMManagerMessages::SNAPSHOTDELETE));
lcm->trigger_snapshot_delete_failure(id);
}
@@ -636,7 +648,8 @@ void VirtualMachineManager::_disksnapshotcreate(unique_ptr msg)
}
else
{
- log_error(id, msg->payload(), "Error creating new disk snapshot");
+ log_error(id, msg->payload(),
+ vm_msg_t::type_str(VMManagerMessages::DISKSNAPSHOTCREATE));
lcm->trigger_disk_snapshot_failure(id);
}
@@ -667,7 +680,8 @@ void VirtualMachineManager::_disksnapshotrevert(unique_ptr msg)
}
else
{
- log_error(id, msg->payload(), "Error reverting disk snapshot");
+ log_error(id, msg->payload(),
+ vm_msg_t::type_str(VMManagerMessages::DISKSNAPSHOTREVERT));
lcm->trigger_disk_snapshot_failure(id);
}
@@ -698,7 +712,8 @@ void VirtualMachineManager::_resizedisk(unique_ptr msg)
}
else
{
- log_error(id, msg->payload(), "Error resizing disk");
+ log_error(id, msg->payload(),
+ vm_msg_t::type_str(VMManagerMessages::RESIZEDISK));
lcm->trigger_disk_resize_failure(id);
}
@@ -729,7 +744,8 @@ void VirtualMachineManager::_updateconf(unique_ptr msg)
}
else
{
- log_error(id, msg->payload(), "Error updating conf for VM");
+ log_error(id, msg->payload(),
+ vm_msg_t::type_str(VMManagerMessages::UPDATECONF));
lcm->trigger_update_conf_failure(id);
}
@@ -788,7 +804,8 @@ void VirtualMachineManager::_updatesg(unique_ptr msg)
}
else
{
- log_error(vm.get(), msg->payload(), "Error updating security groups.");
+ log_error(vm.get(), msg->payload(),
+ vm_msg_t::type_str(VMManagerMessages::UPDATESG));
vmpool->update(vm.get());
}
@@ -830,7 +847,8 @@ void VirtualMachineManager::_resize(unique_ptr msg)
}
else
{
- log_error(id, msg->payload(), "Error resizing VM");
+ log_error(id, msg->payload(),
+ vm_msg_t::type_str(VMManagerMessages::RESIZE));
lcm->trigger_resize_failure(id);
}
diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/file_helper.rb b/src/vmm_mad/remotes/lib/vcenter_driver/file_helper.rb
index d014de16a2..1c5c50c81f 100644
--- a/src/vmm_mad/remotes/lib/vcenter_driver/file_helper.rb
+++ b/src/vmm_mad/remotes/lib/vcenter_driver/file_helper.rb
@@ -56,7 +56,7 @@ module VCenterDriver
# REMOVE: no need to change...
def self.get_img_name_from_path(path, vm_id, disk_id)
- # Note: This will probably fail if the basename contains '.'
+ # NOTE: This will probably fail if the basename contains '.'
"#{path.split('.').first}-#{vm_id}-#{disk_id}.vmdk"
end
diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/host.rb b/src/vmm_mad/remotes/lib/vcenter_driver/host.rb
index b624cd6a41..7b7e682f3e 100644
--- a/src/vmm_mad/remotes/lib/vcenter_driver/host.rb
+++ b/src/vmm_mad/remotes/lib/vcenter_driver/host.rb
@@ -1047,8 +1047,8 @@ module VCenterDriver
def create_vss(
name,
+ num_ports,
pnics = nil,
- num_ports = 128,
mtu = 1500,
pnics_available = nil
)
@@ -1057,6 +1057,8 @@ module VCenterDriver
hostbridge = nil
nics = []
+ num_ports = 128 if num_ports.nil?
+
if pnics
pnics = pnics.split(',')
pnics.each do |pnic|
diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb b/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb
index 801c8cea89..9eb540efd0 100644
--- a/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb
+++ b/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb
@@ -519,10 +519,9 @@ module VCenterDriver
# or create it if not exist
def find_or_create_folder(folder_root, name)
folder_root.childEntity.each do |child|
- if child.instance_of? RbVmomi::VIM::Folder
- if child.name == name
- return child
- end
+ if child.instance_of?(RbVmomi::VIM::Folder) &&
+ child.name == name
+ return child
end
end
@@ -577,7 +576,7 @@ module VCenterDriver
if vcenter_vm_folder_object.nil?
begin
- vcenter_vm_folder_list = vcenter_vm_folder.split("/")
+ vcenter_vm_folder_list = vcenter_vm_folder.split('/')
folder_root = dc.item.vmFolder
vcenter_vm_folder_list.each do |folder_name|
@@ -588,10 +587,12 @@ module VCenterDriver
end
rescue StandardError => e
error_message = e.message
- if VCenterDriver::CONFIG[:debug_information]
- error_message += " " + e.backtrace
- end
- raise "Cannot create Folder in vCenter: #{error_message}"
+ if VCenterDriver::CONFIG[:debug_information]
+ error_message += ' ' + e.backtrace
+ end
+
+ raise 'Cannot create Folder in vCenter: '\
+ "#{error_message}"
end
end
end
diff --git a/src/vnm_mad/remotes/elastic/Elastic.rb b/src/vnm_mad/remotes/elastic/Elastic.rb
index 24ca4abd09..ba76cfff43 100644
--- a/src/vnm_mad/remotes/elastic/Elastic.rb
+++ b/src/vnm_mad/remotes/elastic/Elastic.rb
@@ -82,9 +82,11 @@ class ElasticDriver < VNMMAD::VNMDriver
next if attach_nic_id && attach_nic_id != nic[:nic_id]
ip("route del #{nic[:ip]}/32 dev #{nic[:bridge]} | true")
- ip("neighbour del proxy #{nic[:gateway]} dev #{nic[:bridge]} | true")
+ ip("neighbour del proxy #{nic[:gateway]} dev #{nic[:bridge]} " <<
+ '| true')
next if nic[:conf][:keep_empty_bridge]
+
ip("link delete #{nic[:bridge]} | true")
end
@@ -102,6 +104,7 @@ class ElasticDriver < VNMMAD::VNMDriver
attach_nic_id = @vm['TEMPLATE/NIC[ATTACH="YES"]/NIC_ID']
rc = @vm.each_nic do |nic|
next if attach_nic_id && attach_nic_id != nic[:nic_id]
+
# pass aws_allocation_id if present
opts = { :alloc_id => nic[:aws_allocation_id] }
@@ -111,7 +114,10 @@ class ElasticDriver < VNMMAD::VNMDriver
assigned << [nic[:ip], nic[:external]]
end
- assigned.each {|ip, ext| provider.unassign(ip, ext) } unless rc # rollback
+ # rollback
+ assigned.each do |ip, ext|
+ provider.unassign(ip, ext)
+ end unless rc
!rc
end
@@ -125,6 +131,7 @@ class ElasticDriver < VNMMAD::VNMDriver
attach_nic_id = @vm['TEMPLATE/NIC[ATTACH="YES"]/NIC_ID']
@vm.each_nic do |nic|
next if attach_nic_id && attach_nic_id != nic[:nic_id]
+
provider.unassign(nic[:ip], nic[:external])
end
end
@@ -160,5 +167,6 @@ class ElasticDriver < VNMMAD::VNMDriver
commands.add :ip, params
commands.run_remote(@ssh)
end
+
end
- # rubocop:enable Naming/FileName
+# rubocop:enable Naming/FileName
diff --git a/src/vnm_mad/remotes/elastic/aws_vnm.rb b/src/vnm_mad/remotes/elastic/aws_vnm.rb
index 498f7d9869..f09a26e10f 100644
--- a/src/vnm_mad/remotes/elastic/aws_vnm.rb
+++ b/src/vnm_mad/remotes/elastic/aws_vnm.rb
@@ -91,7 +91,9 @@ class AWSProvider
filter = [{ :name => 'public-ip', :values => [external] }]
aws_ip = @ec2.describe_addresses({ :filters => filter }).addresses[0]
- if aws_ip.nil? || aws_ip.network_interface_id.nil? || aws_ip.private_ip_address.nil?
+ if aws_ip.nil? \
+ || aws_ip.network_interface_id.nil? \
+ || aws_ip.private_ip_address.nil?
return
end
diff --git a/src/vnm_mad/remotes/elastic/packet_vnm.rb b/src/vnm_mad/remotes/elastic/packet_vnm.rb
index a59013f258..9554370afd 100644
--- a/src/vnm_mad/remotes/elastic/packet_vnm.rb
+++ b/src/vnm_mad/remotes/elastic/packet_vnm.rb
@@ -49,7 +49,7 @@ class PacketProvider
@deploy_id = host['TEMPLATE/PROVISION/DEPLOY_ID']
end
- def assign(ip, _external, opts = {})
+ def assign(ip, _external, _opts = {})
@client.assign_cidr_device("#{ip}/32", @deploy_id)
0
rescue StandardError => e
diff --git a/src/vnm_mad/remotes/elastic/pre b/src/vnm_mad/remotes/elastic/pre
index 5a57ad9703..5368cbfe19 100755
--- a/src/vnm_mad/remotes/elastic/pre
+++ b/src/vnm_mad/remotes/elastic/pre
@@ -50,7 +50,6 @@ begin
deploy_id)
drv.create_bridges
-
rescue StandardError => e
OpenNebula.log_error(e.message)
OpenNebula.log_error(e.backtrace)
diff --git a/src/vnm_mad/remotes/lib/vlan.rb b/src/vnm_mad/remotes/lib/vlan.rb
index 5e6e602a48..8abe751132 100644
--- a/src/vnm_mad/remotes/lib/vlan.rb
+++ b/src/vnm_mad/remotes/lib/vlan.rb
@@ -141,7 +141,6 @@ module VNMMAD
@nic[:vlan_dev] = "#{@nic[:phydev]}.#{@nic[:vlan_id]}"
end
-
def list_interface_vlan(_name)
nil
end