From 1f9844cd71938d9fcc77791fd7812ab919a4f9a7 Mon Sep 17 00:00:00 2001
From: Frederick Borges <fborges@opennebula.io>
Date: Tue, 12 Jan 2021 12:07:34 +0100
Subject: [PATCH 01/18] F #4994: Add VM name to VNC dialog

Signed-off-by: Frederick Borges <fborges@opennebula.io>
---
 src/sunstone/public/app/opennebula/vm.js        |  4 +++-
 src/sunstone/public/app/tabs/vms-tab/actions.js | 12 +++++++++---
 src/sunstone/public/app/utils/gclient.js        | 15 ++++++++-------
 3 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/src/sunstone/public/app/opennebula/vm.js b/src/sunstone/public/app/opennebula/vm.js
index e4d94b99a2..bc72ab17bf 100644
--- a/src/sunstone/public/app/opennebula/vm.js
+++ b/src/sunstone/public/app/opennebula/vm.js
@@ -612,7 +612,8 @@ define(function(require) {
       var callback = params.success;
       var callback_error = params.error;
       var id = params.data.id;
-      var typeConnection = params.data.extra_param;
+      var typeConnection = params.data.extra_param.type;
+      var vm_name = params.data.extra_param.vm_name;
       var resource = RESOURCE;
 
       var request = OpenNebulaHelper.request(resource, null, params.data);
@@ -621,6 +622,7 @@ define(function(require) {
         type: "POST",
         dataType: "json",
         success: function(response) {
+          response.vm_name = vm_name;
           return callback ? callback(request, response) : null;
         },
         error: function(response) {
diff --git a/src/sunstone/public/app/tabs/vms-tab/actions.js b/src/sunstone/public/app/tabs/vms-tab/actions.js
index 13ec2226fb..7b830e1071 100644
--- a/src/sunstone/public/app/tabs/vms-tab/actions.js
+++ b/src/sunstone/public/app/tabs/vms-tab/actions.js
@@ -347,7 +347,9 @@ define(function(require) {
       type: "custom",
       call: function() {
         $.each(Sunstone.getDataTable(TAB_ID).elements(), function(index, elem) {
-          Sunstone.runAction("VM.startguac_action", elem, 'vnc');
+          var vm_name = OpenNebulaVM.getName(elem);
+          var extra_param = {type: 'vnc', 'vm_name': vm_name }
+          Sunstone.runAction("VM.startguac_action", elem, extra_param);
         });
       },
       error: function(req, resp) {
@@ -358,7 +360,9 @@ define(function(require) {
       type: "custom",
       call: function() {
         $.each(Sunstone.getDataTable(TAB_ID).elements(), function(index, elem) {
-          Sunstone.runAction("VM.startguac_action", elem, 'rdp');
+          var vm_name = OpenNebulaVM.getName(elem);
+          var extra_param = {type: 'rdp', 'vm_name': vm_name }
+          Sunstone.runAction("VM.startguac_action", elem, extra_param);
         });
       },
       error: function(req, resp) {
@@ -369,7 +373,9 @@ define(function(require) {
       type: "custom",
       call: function() {
         $.each(Sunstone.getDataTable(TAB_ID).elements(), function(index, elem) {
-          Sunstone.runAction("VM.startguac_action", elem, 'ssh');
+          var vm_name = OpenNebulaVM.getName(elem);
+          var extra_param = {type: 'ssh', 'vm_name': vm_name }
+          Sunstone.runAction("VM.startguac_action", elem, extra_param);
         });
       },
       error: function(req, resp) {
diff --git a/src/sunstone/public/app/utils/gclient.js b/src/sunstone/public/app/utils/gclient.js
index 1369fe6cd8..23176cac59 100644
--- a/src/sunstone/public/app/utils/gclient.js
+++ b/src/sunstone/public/app/utils/gclient.js
@@ -75,6 +75,7 @@ define(function(require) {
 
     var tunnel = new Guacamole.WebSocketTunnel(wsprotocol + '//' + host + ':' + port + '/fireedge/guacamole')
     var guac = this._client = new Guacamole.Client(tunnel);
+    var vm_name = response.vm_name || "";
 
     // Client display
     this._display = $("#guacamole-display");
@@ -94,19 +95,19 @@ define(function(require) {
     guac.onstatechange = function(state) {
       switch (state) {
         case 0:
-          setStatus("Client IDLE");
+          setStatus("Client IDLE to: " + vm_name);
           setLoading(true);
         break;
         case 1:
-          setStatus("Client CONNECTING");
+          setStatus("Client CONNECTING to: " + vm_name);
           setLoading(true);
           break;
         case 2:
-          setStatus("Client WAITING");
+          setStatus("Client WAITING to: " + vm_name);
           setLoading(true);
           break;
         case 3:
-          setStatus("Client CONNECTED");
+          setStatus("Client CONNECTED to: " + vm_name);
           setLoading(false);
           setTimeout(function() {
             rescale(that);
@@ -114,15 +115,15 @@ define(function(require) {
           }, 100);
           break;
         case 4:
-          setStatus("Client DISCONNECTING");
+          setStatus("Client DISCONNECTING to: " + vm_name);
           setLoading(true);
           break;
         case 5:
-          setStatus("Client DISCONNECTED");
+          setStatus("Client DISCONNECTED to: " + vm_name);
           setLoading(false);
           break;
         default:
-          setStatus("Client ERROR");
+          setStatus("Client ERROR to: " + vm_name);
           setLoading(false);
           break;
       }

From ce7aac40a2fd9280420c858839e91f5d39c5dcb1 Mon Sep 17 00:00:00 2001
From: "Ruben S. Montero" <rsmontero@opennebula.org>
Date: Tue, 12 Jan 2021 16:59:50 +0000
Subject: [PATCH 02/18] M #-: Update opennebula_configuration.xsd (oned.conf)

---
 share/doc/xsd/opennebula_configuration.xsd | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/share/doc/xsd/opennebula_configuration.xsd b/share/doc/xsd/opennebula_configuration.xsd
index 614eada121..76bfa11cdd 100644
--- a/share/doc/xsd/opennebula_configuration.xsd
+++ b/share/doc/xsd/opennebula_configuration.xsd
@@ -19,6 +19,7 @@
           <xs:complexType>
             <xs:all>
               <xs:element name="DRIVER_MANAGED_GROUPS" type="xs:string"/>
+              <xs:element name="DRIVER_MANAGED_GROUP_ADMIN" type="xs:string"/>
               <xs:element name="MAX_TOKEN_TIME" type="xs:integer"/>
               <xs:element name="NAME" type="xs:string"/>
               <xs:element name="PASSWORD_CHANGE" type="xs:string"/>
@@ -164,6 +165,7 @@
         </xs:element>
 
         <xs:element name="HOST_ENCRYPTED_ATTR" type="xs:string" minOccurs="0" maxOccurs="unbounded"/>
+        <xs:element name="DOCUMENT_ENCRYPTED_ATTR" type="xs:string" minOccurs="0" maxOccurs="unbounded"/>
         <xs:element name="IMAGE_RESTRICTED_ATTR" type="xs:string" minOccurs="0" maxOccurs="unbounded"/>
 
         <xs:element name="IM_MAD" minOccurs="0" maxOccurs="unbounded">

From c0e482fdfa3a1c1173ca5b1e23f6a46c378ae7de Mon Sep 17 00:00:00 2001
From: Ricardo Diaz <rdiaz@opennebula.io>
Date: Wed, 13 Jan 2021 09:22:19 +0100
Subject: [PATCH 03/18] M #-: Do not provision volumes for DS in providers
 (#625)

Signed-off-by: Ricardo Diaz <rdiaz@opennebula.io>
---
 .../providers/templates/aws/datastore.erb        | 16 ++++++++--------
 .../providers/templates/packet/datastore.erb     | 16 ++++++++--------
 2 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/src/oneprovision/lib/terraform/providers/templates/aws/datastore.erb b/src/oneprovision/lib/terraform/providers/templates/aws/datastore.erb
index 101dbd7eb7..0b1520485c 100644
--- a/src/oneprovision/lib/terraform/providers/templates/aws/datastore.erb
+++ b/src/oneprovision/lib/terraform/providers/templates/aws/datastore.erb
@@ -1,9 +1,9 @@
-resource "aws_ebs_volume" "device_<%= obj['ID'] %>" {
-    availability_zone = "<%= provision['AWS_REGION'] %>"
-    size              = "<%= obj['TOTAL_MB'] %>"
-
-    tags = {
-        Name = "<%= obj['NAME'] %>"
-    }
-}
+#resource "aws_ebs_volume" "device_<%= obj['ID'] %>" {
+#    availability_zone = "<%= provision['AWS_REGION'] %>"
+#    size              = "<%= obj['TOTAL_MB'] %>"
+#
+#    tags = {
+#        Name = "<%= obj['NAME'] %>"
+#    }
+#}
 
diff --git a/src/oneprovision/lib/terraform/providers/templates/packet/datastore.erb b/src/oneprovision/lib/terraform/providers/templates/packet/datastore.erb
index 4b82590647..11ea8893aa 100644
--- a/src/oneprovision/lib/terraform/providers/templates/packet/datastore.erb
+++ b/src/oneprovision/lib/terraform/providers/templates/packet/datastore.erb
@@ -1,9 +1,9 @@
-resource "packet_volume" "device_<%= obj['ID'] %>" {
-    description   = "<%= obj['ID'] %>_volume"
-    facility      = "<%= provision['FACILITY'] %>"
-    project_id    = "<%= provision['PACKET_PROJECT'] %>"
-    plan          = "<%= provision['PLAN'] %>"
-    size          = "<%= obj['TOTAL_MB'] %>"
-    billing_cycle = "hourly"
-}
+#resource "packet_volume" "device_<%= obj['ID'] %>" {
+#    description   = "<%= obj['ID'] %>_volume"
+#    facility      = "<%= provision['FACILITY'] %>"
+#    project_id    = "<%= provision['PACKET_PROJECT'] %>"
+#    plan          = "<%= provision['PLAN'] %>"
+#    size          = "<%= obj['TOTAL_MB'] %>"
+#    billing_cycle = "hourly"
+#}
 

From 3187ff05f3eb52216ea4b90cde16b12d947bccb7 Mon Sep 17 00:00:00 2001
From: Jan Orel <jorel@opennebula.systems>
Date: Wed, 13 Jan 2021 09:24:50 +0100
Subject: [PATCH 04/18] M #-: Fix ssh live migration on Debian10 (#627)

---
 src/vmm_mad/remotes/kvm/migrate | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/src/vmm_mad/remotes/kvm/migrate b/src/vmm_mad/remotes/kvm/migrate
index ce81af4e63..5d2451d045 100755
--- a/src/vmm_mad/remotes/kvm/migrate
+++ b/src/vmm_mad/remotes/kvm/migrate
@@ -30,12 +30,11 @@ is_readonly() {
     local DOMAIN=$1
     local DISK=$2
 
-    READ_ONLY=$(awk 'gsub(/[\0]/, x)' \
-        <( virsh --connect $LIBVIRT_URI dumpxml $DOMAIN | \
+    READ_ONLY=$(virsh --connect $LIBVIRT_URI dumpxml $DOMAIN | \
             $XPATH --stdin --subtree \
-            "//domain/devices/disk[source/@file='$DISK']/readonly"))
+            "//domain/devices/disk[source/@file='$DISK']/readonly")
 
-    [ "$READ_ONLY" = '<readonly/>' ]
+    [[ "$READ_ONLY" =~ '<readonly/>' ]]
 }
 
 get_size_and_format_of_disk_img() {

From 0f5ccdd1995566a387a2ef42ebb440306f83a216 Mon Sep 17 00:00:00 2001
From: Jan Orel <jorel@opennebula.systems>
Date: Wed, 13 Jan 2021 09:29:41 +0100
Subject: [PATCH 05/18] M #-: Allow empty provider.inputs (#624)

---
 src/oneprovision/lib/provision/provision.rb | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/src/oneprovision/lib/provision/provision.rb b/src/oneprovision/lib/provision/provision.rb
index 8845f50414..e6010001af 100644
--- a/src/oneprovision/lib/provision/provision.rb
+++ b/src/oneprovision/lib/provision/provision.rb
@@ -248,7 +248,11 @@ module OneProvision
             end
 
             @provider     = provider
-            cfg['inputs'] = cfg['inputs'] | provider.inputs
+            if cfg['inputs'].nil?
+                cfg['inputs'] = provider.inputs
+            else
+                cfg['inputs'] << provider.inputs unless provider.inputs.nil?
+            end
 
             cfg.validate(false)
 

From de74a4d597aea7368328898c666fee5778e72d15 Mon Sep 17 00:00:00 2001
From: Tino Vazquez <cvazquez@opennebula.io>
Date: Wed, 13 Jan 2021 10:04:15 +0100
Subject: [PATCH 06/18] M #-: Fix typo

(cherry picked from commit 87c67c99f9efb1ea3973a7147471e156d0d5f585)
---
 src/sunstone/public/app/utils/fireedge-validator.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/sunstone/public/app/utils/fireedge-validator.js b/src/sunstone/public/app/utils/fireedge-validator.js
index 38c5d70eee..2de3a45c15 100644
--- a/src/sunstone/public/app/utils/fireedge-validator.js
+++ b/src/sunstone/public/app/utils/fireedge-validator.js
@@ -109,7 +109,7 @@ define(function (require) {
                     }
                 },
                 error: function(request, response, data) {
-                    Notifier.onError(request, {error:{ message: "Fireedge public endpoint is not working, please contact you cloud administrator"}});
+                    Notifier.onError(request, {error:{ message: "Fireedge public endpoint is not working, please contact your cloud administrator"}});
                     is_fireedge_configured = false;
                     if (typeof error == "function"){
                         error();

From a04be0b5c39524d5f5ae2ed74ab79b984d85b975 Mon Sep 17 00:00:00 2001
From: Tino Vazquez <cvazquez@opennebula.io>
Date: Wed, 13 Jan 2021 10:01:57 +0100
Subject: [PATCH 07/18] M #-: Fix typo

(cherry picked from commit 0e007026577277e045e30e9ea717ed18fc3b456a)
---
 src/sunstone/public/app/utils/fireedge-validator.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/sunstone/public/app/utils/fireedge-validator.js b/src/sunstone/public/app/utils/fireedge-validator.js
index 2de3a45c15..042e4ac12b 100644
--- a/src/sunstone/public/app/utils/fireedge-validator.js
+++ b/src/sunstone/public/app/utils/fireedge-validator.js
@@ -76,7 +76,7 @@ define(function (require) {
                     }
                 },
                 error: function(request, response, data) {
-                    Notifier.onError(request, {error:{ message: "Fireedge private endpoint is not working, please contact you cloud administrator"}});
+                    Notifier.onError(request, {error:{ message: "Fireedge private endpoint is not working, please contact your cloud administrator"}});
                     is_fireedge_configured = false;
                     clear_fireedge_token();
                     if (typeof error == "function"){

From 56b58e2ecccb347becbfc90e68cc5c4e481442bd Mon Sep 17 00:00:00 2001
From: "Ruben S. Montero" <rsmontero@opennebula.org>
Date: Wed, 13 Jan 2021 09:59:17 +0000
Subject: [PATCH 08/18] M #-: Hybid+ add vnet instances to cluster

---
 share/oneprovision/hybrid+/provisions/aws.d/networks.yml    | 1 +
 share/oneprovision/hybrid+/provisions/packet.d/networks.yml | 1 +
 2 files changed, 2 insertions(+)

diff --git a/share/oneprovision/hybrid+/provisions/aws.d/networks.yml b/share/oneprovision/hybrid+/provisions/aws.d/networks.yml
index 2b2e42df4a..0252545a4b 100644
--- a/share/oneprovision/hybrid+/provisions/aws.d/networks.yml
+++ b/share/oneprovision/hybrid+/provisions/aws.d/networks.yml
@@ -31,3 +31,4 @@ vntemplates:
     vn_mad: 'vxlan'
     phydev: 'eth0'
     automatic_vlan_id: 'yes'
+    cluster_ids: "${cluster.0.id}"
diff --git a/share/oneprovision/hybrid+/provisions/packet.d/networks.yml b/share/oneprovision/hybrid+/provisions/packet.d/networks.yml
index 18c031c336..63633a744e 100644
--- a/share/oneprovision/hybrid+/provisions/packet.d/networks.yml
+++ b/share/oneprovision/hybrid+/provisions/packet.d/networks.yml
@@ -30,3 +30,4 @@ vntemplates:
     vn_mad: 'vxlan'
     phydev: 'bond0'
     automatic_vlan_id: 'yes'
+    cluster_ids: "${cluster.0.id}"

From d08101193819af6313ea77a200678cc344368b07 Mon Sep 17 00:00:00 2001
From: Alejandro Huertas Herrero <ahuertas@opennebula.systems>
Date: Wed, 13 Jan 2021 13:48:54 +0100
Subject: [PATCH 09/18] M #-: fix minor bug in providers (#631)

---
 src/oneprovision/lib/provider/provider.rb     |  4 +---
 src/oneprovision/lib/provision/provision.rb   | 19 +++++++++++--------
 .../provision/resources/physical/cluster.rb   |  6 +++---
 3 files changed, 15 insertions(+), 14 deletions(-)

diff --git a/src/oneprovision/lib/provider/provider.rb b/src/oneprovision/lib/provider/provider.rb
index 4626110c13..694c61d097 100644
--- a/src/oneprovision/lib/provider/provider.rb
+++ b/src/oneprovision/lib/provider/provider.rb
@@ -85,9 +85,7 @@ module OneProvision
             return rc if OpenNebula.is_error?(rc)
 
             pool.each do |p|
-                next unless p.body['provider'] == body['provider']
-
-                next if p.body['state'] == Provision::STATE['DONE']
+                next unless p.body['provider'] == self['NAME']
 
                 return OpenNebula::Error.new(
                     'Provider can not be deleted, it is used by ' \
diff --git a/src/oneprovision/lib/provision/provision.rb b/src/oneprovision/lib/provision/provision.rb
index e6010001af..0ce9d73353 100644
--- a/src/oneprovision/lib/provision/provision.rb
+++ b/src/oneprovision/lib/provision/provision.rb
@@ -112,19 +112,22 @@ module OneProvision
 
         # Get cluster information
         def cluster
-            return nil unless infrastructure_objects
+            return unless infrastructure_objects
+
             infrastructure_objects['clusters'][0]
         end
 
         # Returns provision hosts
         def hosts
-            return nil unless infrastructure_objects
+            return unless infrastructure_objects
+
             infrastructure_objects['hosts']
         end
 
         # Returns provision datastores
         def datastores
-            return nil unless infrastructure_objects
+            return unless infrastructure_objects
+
             infrastructure_objects['datastores']
         end
 
@@ -247,7 +250,8 @@ module OneProvision
                 return OpenNebula::Error.new('No provider found')
             end
 
-            @provider     = provider
+            @provider = provider
+
             if cfg['inputs'].nil?
                 cfg['inputs'] = provider.inputs
             else
@@ -538,7 +542,7 @@ module OneProvision
 
             OneProvisionLogger.debug(msg)
 
-            datastores = cfg['cluster'].delete("datastores")
+            datastores = cfg['cluster'].delete('datastores')
 
             obj = Cluster.new(nil, cfg['cluster'])
 
@@ -546,7 +550,7 @@ module OneProvision
 
             id = obj.create
 
-            datastores.each { |i| obj.adddatastore(i) } if datastores
+            datastores.each {|i| obj.adddatastore(i) } if datastores
 
             infrastructure_objects['clusters'] = []
             infrastructure_objects['clusters'] << { 'id'   => id,
@@ -570,8 +574,7 @@ module OneProvision
                 cfg[r].each do |x|
                     Driver.retry_loop('Failed to create some resources',
                                       self) do
-
-                        x['provision'] = {'id' => @id }
+                        x['provision'] = { 'id' => @id }
                         obj            = Resource.object(r, nil, x)
 
                         next if obj.nil?
diff --git a/src/oneprovision/lib/provision/resources/physical/cluster.rb b/src/oneprovision/lib/provision/resources/physical/cluster.rb
index 9b8fa4106d..d72da25e24 100644
--- a/src/oneprovision/lib/provision/resources/physical/cluster.rb
+++ b/src/oneprovision/lib/provision/resources/physical/cluster.rb
@@ -79,9 +79,9 @@ module OneProvision
             end
 
             # Remove non-provision elements added to the cluster
-            @one.datastore_ids.each { |i| @one.deldatastore(i) }
-            @one.vnet_ids.each { |i| @one.delvnet(i) }
-            @one.host_ids.each { |i| @one.delhost(i) }
+            @one.datastore_ids.each {|i| @one.deldatastore(i) }
+            @one.vnet_ids.each {|i| @one.delvnet(i) }
+            @one.host_ids.each {|i| @one.delhost(i) }
 
             Utils.exception(@one.delete)
 

From c5d7998ed348c48dcd1ec8aa07216272c023dfcb Mon Sep 17 00:00:00 2001
From: "Ruben S. Montero" <rsmontero@opennebula.org>
Date: Wed, 13 Jan 2021 15:15:52 +0100
Subject: [PATCH 10/18] M #-: Fix PROVSION_ID reference

---
 src/vnm_mad/remotes/elastic/Elastic.rb | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/src/vnm_mad/remotes/elastic/Elastic.rb b/src/vnm_mad/remotes/elastic/Elastic.rb
index 46f856dda0..24ca4abd09 100644
--- a/src/vnm_mad/remotes/elastic/Elastic.rb
+++ b/src/vnm_mad/remotes/elastic/Elastic.rb
@@ -44,12 +44,12 @@ class ElasticDriver < VNMMAD::VNMDriver
 
         raise rc if OpenNebula.is_error?(rc)
 
-        unless @host.has_elements?('TEMPLATE/PROVISION_ID')
-            OpenNebula.log_error("No PROVISION_ID for host #{host_id}")
+        unless @host.has_elements?('TEMPLATE/PROVISION/ID')
+            OpenNebula.log_error("No ID in PROVISION for host #{host_id}")
             exit 1
         end
 
-        provision_id = @host['TEMPLATE/PROVISION_ID']
+        provision_id = @host['TEMPLATE/PROVISION/ID']
         provision = OneProvision::Provision.new_with_id(provision_id, client)
         provision.info
 
@@ -161,4 +161,4 @@ class ElasticDriver < VNMMAD::VNMDriver
         commands.run_remote(@ssh)
     end
 end
-# rubocop:enable Naming/FileName
+    # rubocop:enable Naming/FileName

From 2586034a3168a8274b74171588206dc34fa028b9 Mon Sep 17 00:00:00 2001
From: Alejandro Huertas Herrero <ahuertas@opennebula.systems>
Date: Wed, 13 Jan 2021 18:15:21 +0100
Subject: [PATCH 11/18] M #-: make provider name inmutable (#634)

---
 src/oneprovision/lib/provider/provider.rb | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/oneprovision/lib/provider/provider.rb b/src/oneprovision/lib/provider/provider.rb
index 694c61d097..6820c1f880 100644
--- a/src/oneprovision/lib/provider/provider.rb
+++ b/src/oneprovision/lib/provider/provider.rb
@@ -22,7 +22,7 @@ module OneProvision
         DOCUMENT_TYPE = 102
 
         # These attributes can not be changed when updating the provider
-        IMMUTABLE_ATTRS = %w[provider]
+        IMMUTABLE_ATTRS = %w[provider name]
 
         # Allocates a new document
         #

From 4559bb5226ad66e83488dfbf2a3e7e385d6dacb9 Mon Sep 17 00:00:00 2001
From: Alejandro Huertas Herrero <ahuertas@opennebula.systems>
Date: Wed, 13 Jan 2021 18:15:49 +0100
Subject: [PATCH 12/18] B #5224: fix CLI output when no terminal (#637)

---
 src/cli/one_helper.rb | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/cli/one_helper.rb b/src/cli/one_helper.rb
index dd87091c70..20cacd1899 100644
--- a/src/cli/one_helper.rb
+++ b/src/cli/one_helper.rb
@@ -705,7 +705,7 @@ EOT
             else
                 rc = pool.info
 
-                return rc if OpenNebula.is_error?(rc)
+                return -1, rc.message if OpenNebula.is_error?(rc)
 
                 _, hash = print_page(pool, options)
 

From 5d7ae36c682a19f6023770898d6ffccaf82cfd20 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Petr=20Ospal=C3=BD?= <pospaly@opennebula.io>
Date: Wed, 13 Jan 2021 19:09:49 +0100
Subject: [PATCH 13/18] F OpenNebula/one#5178: Add time to supervisord log
 (#635)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Petr Ospalý <pospaly@opennebula.io>
---
 .../pkgs/services/supervisor/centos8/scripts/lib/functions.sh | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/share/pkgs/services/supervisor/centos8/scripts/lib/functions.sh b/share/pkgs/services/supervisor/centos8/scripts/lib/functions.sh
index bd27bd11b9..b942fd34c2 100644
--- a/share/pkgs/services/supervisor/centos8/scripts/lib/functions.sh
+++ b/share/pkgs/services/supervisor/centos8/scripts/lib/functions.sh
@@ -4,12 +4,12 @@
 
 msg()
 (
-    echo "[SUPERVISOR]: ${SUPERVISOR_PROCESS_NAME}: $*"
+    echo "$(date '+%F %T') [SUPERVISOR]: ${SUPERVISOR_PROCESS_NAME}: $*"
 )
 
 err()
 (
-    echo "[SUPERVISOR] [!] ERROR: ${SUPERVISOR_PROCESS_NAME}: $*"
+    echo "$(date '+%F %T') [SUPERVISOR] [!] ERROR: ${SUPERVISOR_PROCESS_NAME}: $*"
 )
 
 is_running()

From 0c01395b46f86e802912f6451f8c3cacecae1bed Mon Sep 17 00:00:00 2001
From: "Carlos J. Herrera" <cherrera@opennebula.io>
Date: Thu, 14 Jan 2021 06:42:48 -0500
Subject: [PATCH 14/18] L #~: Linting vcenter driver (#600)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Co-authored-by: Tino Vázquez <cvazquez@opennebula.io>
---
 share/hooks/vcenter/create_vcenter_net.rb     |    4 +-
 share/hooks/vcenter/delete_vcenter_net.rb     |   12 +-
 share/linters/.rubocop.yml                    |   10 -
 src/cli/one_helper/onevcenter_helper.rb       |  314 +--
 .../remotes/vcenter_downloader.rb             |   24 +-
 src/datastore_mad/remotes/vcenter_uploader.rb |   25 +-
 src/im_mad/remotes/lib/vcenter_cluster.rb     |  191 +-
 src/im_mad/remotes/lib/vcenter_monitor_vms.rb |   38 +-
 src/sunstone/opennebula_vmrc.rb               |    2 +-
 .../remotes/lib/vcenter_driver/host.rb        |    2 +-
 .../lib/vcenter_driver/virtual_machine.rb     | 2220 +++++++++++------
 .../remotes/lib/vcenter_driver/vm_template.rb |  645 +++--
 .../lib/vcenter_driver/vmm_importer.rb        |   22 +-
 13 files changed, 2187 insertions(+), 1322 deletions(-)

diff --git a/share/hooks/vcenter/create_vcenter_net.rb b/share/hooks/vcenter/create_vcenter_net.rb
index 54405acdbd..e4f9e4d916 100755
--- a/share/hooks/vcenter/create_vcenter_net.rb
+++ b/share/hooks/vcenter/create_vcenter_net.rb
@@ -47,7 +47,9 @@ require 'nsx_driver'
 
 # Exceptions
 class AllocateNetworkError < StandardError; end
+
 class CreateNetworkError < StandardError; end
+
 class UpdateNetworkError < StandardError; end
 
 # FUNCTIONS
@@ -370,7 +372,7 @@ begin
     ccr_ref  = one_host['TEMPLATE/VCENTER_CCR_REF']
     cluster  = VCenterDriver::ClusterComputeResource.new_from_ref(ccr_ref,
                                                                   vi_client)
-    dc       = cluster.get_dc
+    dc       = cluster.datacenter
 
     # Step 3. Create the port groups based on each type
     if pg_type == VCenterDriver::Network::NETWORK_TYPE_NSXV
diff --git a/share/hooks/vcenter/delete_vcenter_net.rb b/share/hooks/vcenter/delete_vcenter_net.rb
index 91c9f3db7f..0e8390c755 100755
--- a/share/hooks/vcenter/delete_vcenter_net.rb
+++ b/share/hooks/vcenter/delete_vcenter_net.rb
@@ -45,6 +45,7 @@ require 'nsx_driver'
 
 # Exceptions
 class DeleteNetworkError < StandardError; end
+
 class DeletePortgroupError < StandardError; end
 
 # FUNCTIONS
@@ -74,7 +75,7 @@ end
 
 vnet_xml = arguments_xml.xpath(VNET_XPATH).to_s
 
-template    = OpenNebula::XMLElement.new
+template = OpenNebula::XMLElement.new
 template.initialize_xml(vnet_xml, 'VNET')
 managed  = template['TEMPLATE/OPENNEBULA_MANAGED'] != 'NO'
 imported = template['TEMPLATE/VCENTER_IMPORTED']
@@ -109,7 +110,7 @@ begin
     ccr_ref = one_host['TEMPLATE/VCENTER_CCR_REF']
     cluster = VCenterDriver::ClusterComputeResource
               .new_from_ref(ccr_ref, vi_client)
-    dc = cluster.get_dc
+    dc = cluster.datacenter
 
     # NSX
     ls_id = template['TEMPLATE/NSX_ID']
@@ -148,7 +149,7 @@ begin
         cluster['host'].each do |host|
             # Step 3. Loop through hosts in clusters
             esx_host = VCenterDriver::ESXHost
-                        .new_from_ref(host._ref, vi_client)
+                       .new_from_ref(host._ref, vi_client)
 
             begin
                 esx_host.lock # Exclusive lock for ESX host operation
@@ -179,7 +180,7 @@ begin
         begin
             nsx_client = NSXDriver::NSXClient.new_from_id(host_id)
             logical_switch = NSXDriver::VirtualWire
-                              .new(nsx_client, ls_id, nil, nil)
+                             .new(nsx_client, ls_id, nil, nil)
             logical_switch.delete_logical_switch
         rescue StandardError => e
             err_msg = e.message
@@ -191,14 +192,13 @@ begin
         begin
             nsx_client = NSXDriver::NSXClient.new_from_id(host_id)
             logical_switch = NSXDriver::OpaqueNetwork
-                              .new(nsx_client, ls_id, nil, nil)
+                             .new(nsx_client, ls_id, nil, nil)
             logical_switch.delete_logical_switch
         rescue StandardError => e
             err_msg = e.message
             raise DeletePortgroupError, err_msg
         end
     end
-
 rescue DeleteNetworkError => e
     STDERR.puts e.message
     STDERR.puts e.backtrace if VCenterDriver::CONFIG[:debug_information]
diff --git a/share/linters/.rubocop.yml b/share/linters/.rubocop.yml
index 37b5db0c49..25341ef7e4 100644
--- a/share/linters/.rubocop.yml
+++ b/share/linters/.rubocop.yml
@@ -82,14 +82,11 @@ AllCops:
     - src/im_mad/remotes/kvm-probes.d/pci.rb
     - src/im_mad/remotes/kvm.d/monitord-client.rb
     - src/im_mad/remotes/lxd.d/monitord-client.rb
-    - src/im_mad/remotes/vcenter.d/poll
     - src/im_mad/remotes/packet.d/poll
     - src/im_mad/remotes/ec2.d/poll
     - src/im_mad/remotes/one.d/poll
     - src/im_mad/remotes/az.d/poll
     - src/im_mad/remotes/lib/vcenter_cluster.rb
-    - src/im_mad/remotes/lib/vcenter_monitor.rb
-    - src/im_mad/remotes/lib/vcenter_monitor_vms.rb
     - src/vnm_mad/remotes/ovswitch/post
     - src/vnm_mad/remotes/ovswitch/clean
     - src/vnm_mad/remotes/ovswitch/pre
@@ -183,8 +180,6 @@ AllCops:
     - src/cli/one
     - share/scons/po2json.rb
     - share/sudoers/sudo_commands.rb
-    - share/hooks/vcenter/delete_vcenter_net.rb
-    - share/hooks/vcenter/create_vcenter_net.rb
     - share/hooks/ft/host_error.rb
     - share/instance_types/ec2-instance-types.rb
     - share/instance_types/az-instance-types.rb
@@ -274,8 +269,6 @@ AllCops:
     - src/sunstone/routes/vcenter.rb
     - src/sunstone/models/OpenNebula2FA/SunstoneWebAuthn.rb
     - src/onegate/onegate-server.rb
-    - src/datastore_mad/remotes/vcenter_downloader.rb
-    - src/datastore_mad/remotes/vcenter_uploader.rb
     - src/datastore_mad/remotes/xpath.rb
     - src/datastore_mad/remotes/url.rb
     - src/datastore_mad/one_datastore.rb
@@ -315,7 +308,6 @@ AllCops:
     - src/cli/one_helper/oneuser_helper.rb
     - src/cli/one_helper/onegroup_helper.rb
     - src/cli/one_helper/onevnet_helper.rb
-    - src/cli/one_helper/onevcenter_helper.rb
     - src/cli/one_helper/onecluster_helper.rb
     - src/cli/one_helper/onevntemplate_helper.rb
     - src/cli/one_helper/onevrouter_helper.rb
@@ -386,8 +378,6 @@ AllCops:
     - src/oca/ruby/deprecated/OpenNebula.rb
     - src/vmm_mad/dummy/one_vmm_dummy.rb
     - src/vmm_mad/remotes/one/opennebula_driver.rb
-    - src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb
-    - src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb
     - src/vmm_mad/remotes/lib/lxd/mapper/qcow2.rb
     - src/vmm_mad/remotes/lib/lxd/mapper/mapper.rb
     - src/vmm_mad/remotes/lib/lxd/mapper/rbd.rb
diff --git a/src/cli/one_helper/onevcenter_helper.rb b/src/cli/one_helper/onevcenter_helper.rb
index 970e9fdf36..958e42ec3e 100644
--- a/src/cli/one_helper/onevcenter_helper.rb
+++ b/src/cli/one_helper/onevcenter_helper.rb
@@ -1,4 +1,3 @@
-
 # -------------------------------------------------------------------------- #
 # Copyright 2002-2020, OpenNebula Project, OpenNebula Systems                #
 #                                                                            #
@@ -17,16 +16,21 @@
 
 require 'one_helper'
 
+##############################################################################
+# Module OneVcenterHelper
+##############################################################################
 class OneVcenterHelper < OpenNebulaHelper::OneHelper
 
     #
     # vCenter importer will divide rvmomi resources
     # in this group, makes parsing easier.
     module VOBJECT
-      DATASTORE = 1
-      TEMPLATE = 2
-      NETWORK = 3
-      IMAGE = 4
+
+        DATASTORE = 1
+        TEMPLATE = 2
+        NETWORK = 3
+        IMAGE = 4
+
     end
 
     #
@@ -37,45 +41,53 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
     # struct:   [Array] LIST FORMAT for opennebula cli
     #           related methods: * cli_format
     #
-    # columns:  [Hash(column => Integer)] Will be used in the list command, Integer represent nbytes
+    # columns:  [Hash(column => Integer)] Will be used in the list command,
+    #           Integer represent nbytes
     #           related methods: * format_list
     #
-    # cli:      [Array] with mandatory args, for example image listing needs a datastore
+    # cli:      [Array] with mandatory args, for example image
+    #           listing needs a datastore
     #           related methods: * parse_opts
     #
-    # dialogue: [Lambda] Used only for Vobject that require a previous dialogue with the user, will be triggered
+    # dialogue: [Lambda] Used only for Vobject that require a previous
+    #                    dialogue with the user, will be triggered
     #                    on importation process
     #           related methods: * network_dialogue
     #                            * template_dialogue
     #
     TABLE = {
         VOBJECT::DATASTORE => {
-            :struct  => ["DATASTORE_LIST", "DATASTORE"],
-            :columns => {:IMID => 5, :REF => 15, :NAME => 50, :CLUSTERS => 10},
+            :struct  => %w[DATASTORE_LIST DATASTORE],
+            :columns =>
+                { :IMID => 5, :REF => 15, :NAME => 50, :CLUSTERS => 10 },
             :cli     => [:host],
-            :dialogue => ->(arg){}
+            :dialogue => ->(arg) {}
         },
         VOBJECT::TEMPLATE => {
-            :struct  => ["TEMPLATE_LIST", "TEMPLATE"],
-            :columns => {:IMID => 5, :REF => 10, :NAME => 50},
+            :struct  => %w[TEMPLATE_LIST TEMPLATE],
+            :columns => { :IMID => 5, :REF => 10, :NAME => 50 },
             :cli     => [:host],
-            :dialogue => ->(arg){ OneVcenterHelper.template_dialogue(arg) }
+            :dialogue => ->(arg) { OneVcenterHelper.template_dialogue(arg) }
         },
         VOBJECT::NETWORK => {
-            :struct  => ["NETWORK_LIST", "NETWORK"],
-            :columns => {:IMID => 5, :REF => 15, :NAME => 30, :CLUSTERS => 20},
+            :struct  => %w[NETWORK_LIST NETWORK],
+            :columns => {
+                :IMID => 5,
+                :REF => 15,
+                :NAME => 30,
+                :CLUSTERS => 20
+            },
             :cli     => [:host],
-            :dialogue => ->(arg){ OneVcenterHelper.network_dialogue(arg) }
+            :dialogue => ->(arg) { OneVcenterHelper.network_dialogue(arg) }
         },
         VOBJECT::IMAGE => {
-            :struct  => ["IMAGE_LIST", "IMAGE"],
-            :columns => {:IMID => 5,:REF => 35, :PATH => 60},
+            :struct  => %w[IMAGE_LIST IMAGE],
+            :columns => { :IMID => 5, :REF => 35, :PATH => 60 },
             :cli     => [:host, :datastore],
-            :dialogue => ->(arg){}
+            :dialogue => ->(arg) {}
         }
     }
 
-
     ################################################################
     # CLI ARGS
     ################################################################
@@ -83,28 +95,27 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
     # these methods will be used by table :cli property
     # the purpose is to inject code when -d option in this case is used
     #
-	# @param arg [String] The parameter passed to the option:w
+    # @param arg [String] The parameter passed to the option:w
     #
 
     def datastore(arg)
         ds = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore, arg)
 
         {
-            ds_ref: ds['TEMPLATE/VCENTER_DS_REF'],
-            one_item: ds
+            :ds_ref => ds['TEMPLATE/VCENTER_DS_REF'],
+            :one_item => ds
         }
     end
 
     def host(arg)
-        return arg
+        arg
     end
 
     ########################
 
-
     # In list command you can use this method to print a header
     #
-	# @param vcenter_host [String] this text will be displayed
+    # @param vcenter_host [String] this text will be displayed
     #
     def show_header(vcenter_host)
         CLIHelper.scr_bold
@@ -112,33 +123,33 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
         puts "# vCenter: #{vcenter_host}".ljust(50)
         CLIHelper.scr_restore
         puts
-
     end
 
     # Using for parse a String into a VOBJECT
     # We will use VOBJECT instances for handle any operatiion
     #
-	# @param type [String] String representing the vCenter resource
+    # @param type [String] String representing the vCenter resource
     #
-    def set_object(type)
-        raise "you need to use -o option!" unless type
+    def object_update(type)
+        raise 'you need to use -o option!' unless type
 
         type = type.downcase
-        if (type == "datastores")
+        case type
+        when 'datastores'
             @vobject = VOBJECT::DATASTORE
-        elsif (type == "templates")
+        when 'templates'
             @vobject = VOBJECT::TEMPLATE
-        elsif (type =="networks")
+        when 'networks'
             @vobject = VOBJECT::NETWORK
-        elsif (type == "images")
+        when 'images'
             @vobject = VOBJECT::IMAGE
         else
             puts "unknown #{type} type option"
-            puts "  -o options:"
-            puts "      datastores"
-            puts "      templates"
-            puts "      networks"
-            puts "      images"
+            puts '  -o options:'
+            puts '      datastores'
+            puts '      templates'
+            puts '      networks'
+            puts '      images'
 
             exit 0
         end
@@ -146,11 +157,11 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
 
     # Handles connection to vCenter.
     #
-	# @param options [Hash] options for the connection
+    # @param options [Hash] options for the connection
     #
     def connection_options(object_name, options)
         if  options[:vuser].nil? || options[:vcenter].nil?
-            raise "vCenter connection parameters are mandatory to import"\
+            raise 'vCenter connection parameters are mandatory to import'\
                   " #{object_name}:\n"\
                   "\t --vcenter vCenter hostname\n"\
                   "\t --vuser username to login in vcenter"
@@ -158,23 +169,29 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
 
         password = options[:vpass] || OpenNebulaHelper::OneHelper.get_password
         {
-           :user     => options[:vuser],
-           :password => password,
-           :host     => options[:vcenter],
-           :port     => options[:port]
+            :user     => options[:vuser],
+            :password => password,
+            :host     => options[:vcenter],
+            :port     => options[:port]
         }
     end
 
-    def cli_format( hash)
-        {TABLE[@vobject][:struct].first => {TABLE[@vobject][:struct].last => hash.values}}
+    def cli_format(hash)
+        {
+            TABLE[@vobject][:struct].first =>
+                {
+                    TABLE[@vobject][:struct].last =>
+                        hash.values
+                }
+        }
     end
 
     # This method will print a list for a vcenter_resource.
     #
-    def list_object(options, list)
+    def list_object(_options, list)
         vcenter_host = list.keys[0]
         list = cli_format(list.values.first)
-        table = format_list()
+        table = format_list
 
         show_header(vcenter_host)
 
@@ -184,11 +201,12 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
     # handles :cli section of TABLE
     # used for executing the dialogue in some VOBJECTS
     #
-    # @param object_info [Hash] This is the object with all the info related to the object
+    # @param object_info [Hash] This is the object
+    #                           with all the info related to the object
     #                           that will be imported
     #
     def cli_dialogue(object_info)
-        return TABLE[@vobject][:dialogue].(object_info)
+        TABLE[@vobject][:dialogue].call(object_info)
     end
 
     # This method iterates over the possible options for certain resources
@@ -197,17 +215,18 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
     # @param opts [Hash] options object passed to the onecenter tool
     #
     def parse_opts(opts)
-        set_object(opts[:object])
+        object_update(opts[:object])
 
         res = {}
         TABLE[@vobject][:cli].each do |arg|
             raise "#{arg} it's mandadory for this op" if opts[arg].nil?
-            res[arg] = self.method(arg).call(opts[arg])
+
+            res[arg] = method(arg).call(opts[arg])
         end
 
         res[:config] = parse_file(opts[:configuration]) if opts[:configuration]
 
-        return res
+        res
     end
 
     # This method will parse a yaml
@@ -218,8 +237,8 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
     #
     def parse_file(path)
         begin
-            config = YAML::load(File.read(path))
-        rescue Exception => e
+            _config = YAML.safe_load(File.read(path))
+        rescue StandardError => _e
             str_error="Unable to read '#{path}'. Invalid YAML syntax:\n"
 
             raise str_error
@@ -230,42 +249,44 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
     # with the purpose of build a complete CLI list
     # OpenNebula way
     #
-    def format_list()
+    def format_list
         config = TABLE[@vobject][:columns]
-        table = CLIHelper::ShowTable.new() do
-            column :IMID, "identifier for ...", :size=>config[:IMID] || 4 do |d|
+        CLIHelper::ShowTable.new do
+            column :IMID, 'identifier for ...', :size=>config[:IMID] || 4 do |d|
                 d[:import_id]
             end
 
-            column :REF, "ref", :left, :adjust, :size=>config[:REF] || 15 do |d|
+            column :REF, 'ref', :left, :adjust, :size=>config[:REF] || 15 do |d|
                 d[:ref]
             end
 
-            column :NAME, "Name", :left, :expand, :size=>config[:NAME] || 20 do |d|
+            column :NAME, 'Name', :left, :expand,
+                   :size=>config[:NAME] || 20 do |d|
                 d[:name] || d[:simple_name]
             end
 
-            column :CLUSTERS, "CLUSTERS", :left, :size=>config[:CLUSTERS] || 10 do |d|
+            column :CLUSTERS, 'CLUSTERS', :left,
+                   :size=>config[:CLUSTERS] || 10 do |d|
                 d = d[:clusters] if d[:clusters]
                 d[:one_ids] || d[:cluster].to_s
             end
 
-            column :PATH, "PATH", :left, :expand, :size=>config[:PATH] || 10 do |d|
+            column :PATH, 'PATH', :left, :expand,
+                   :size=>config[:PATH] || 10 do |d|
                 d[:path]
             end
 
             default(*config.keys)
         end
-
-        table
     end
 
     ################################################################
     # CLI DIALOGUES
     ################################################################
     def self.template_dialogue(t)
-        rps_list = -> {
-            return "" if t[:rp_list].empty?
+        rps_list = lambda {
+            return '' if t[:rp_list].empty?
+
             puts
             t[:rp_list].each do |rp|
                 puts "      #{rp[:name]}"
@@ -277,91 +298,96 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
 
         # default opts
         opts = {
-            linked_clone: '0',
-            copy: '0',
-            name: '',
-            folder: '',
-            resourcepool: [],
-            type: ''
+            :linked_clone => '0',
+            :copy => '0',
+            :name => '',
+            :folder => '',
+            :resourcepool => [],
+            :type => ''
         }
 
         STDOUT.print "\n- Template: \e[92m#{t[:template_name]}\e[39m\n\n"\
 
         # LINKED CLONE OPTION
         STDOUT.print "\n    For faster deployment operations"\
-                     " and lower disk usage, OpenNebula"\
-                     " can create new VMs as linked clones."\
-                     "\n    Would you like to use Linked Clones with VMs based on this template (y/[n])? "
+                     ' and lower disk usage, OpenNebula'\
+                     ' can create new VMs as linked clones.'\
+                     "\n    Would you like to use Linked Clones"\
+                     ' with VMs based on this template (y/[n])? '
 
         if STDIN.gets.strip.downcase == 'y'
             opts[:linked_clone] = '1'
 
-
             # CREATE COPY OPTION
             STDOUT.print "\n    Linked clones requires that delta"\
-                         " disks must be created for each disk in the template."\
-                         " This operation may change the template contents."\
-                         " \n    Do you want OpenNebula to create a copy of the template,"\
-                         " so the original template remains untouched ([y]/n)? "
+                         ' disks must be created for '\
+                         'each disk in the template.'\
+                         ' This operation may change the template contents.'\
+                         " \n    Do you want OpenNebula to "\
+                         'create a copy of the template,'\
+                         ' so the original template remains untouched ([y]/n)? '
 
             if STDIN.gets.strip.downcase != 'n'
                 opts[:copy] = '1'
 
                 # NAME OPTION
                 STDOUT.print "\n    The new template will be named"\
-                             " adding a one- prefix to the name"\
+                             ' adding a one- prefix to the name'\
                              " of the original template. \n"\
-                             "    If you prefer a different name"\
-                             " please specify or press Enter"\
-                             " to use defaults: "
+                             '    If you prefer a different name'\
+                             ' please specify or press Enter'\
+                             ' to use defaults: '
 
                 template_name = STDIN.gets.strip.downcase
                 opts[:name] = template_name
 
-                STDOUT.print "\n    WARNING!!! The cloning operation can take some time"\
+                STDOUT.print "\n    WARNING!!! The cloning "\
+                             'operation can take some time'\
                              " depending on the size of disks.\n"
             end
         end
 
-        STDOUT.print "\n\n    Do you want to specify a folder where"\
-                        " the deployed VMs based on this template will appear"\
-                        " in vSphere's VM and Templates section?"\
-                        "\n    If no path is set, VMs will be placed in the same"\
-                        " location where the template lives."\
-                        "\n    Please specify a path using slashes to separate folders"\
-                        " e.g /Management/VMs or press Enter to use defaults: "\
+        sdtout_print = "\n\n    Do you want to specify a folder where"\
+        ' the deployed VMs based on this template will appear'\
+        " in vSphere's VM and Templates section?"\
+        "\n    If no path is set, VMs will be placed in the same"\
+        ' location where the template lives.'\
+        "\n    Please specify a path using slashes to separate folders"\
+        ' e.g /Management/VMs or press Enter to use defaults: '\
+
+        STDOUT.print sdtout_print
 
         vcenter_vm_folder = STDIN.gets.strip
         opts[:folder] = vcenter_vm_folder
 
         STDOUT.print "\n\n    This template is currently set to "\
-            "launch VMs in the default resource pool."\
+            'launch VMs in the default resource pool.'\
             "\n    Press y to keep this behaviour, n to select"\
-            " a new resource pool or d to delegate the choice"\
-            " to the user ([y]/n/d)? "
+            ' a new resource pool or d to delegate the choice'\
+            ' to the user ([y]/n/d)? '
 
-        answer =  STDIN.gets.strip.downcase
+        answer = STDIN.gets.strip.downcase
 
         case answer.downcase
         when 'd' || 'delegate'
             opts[:type]='list'
             puts "separate with commas ',' the list that you want to deleate:"
 
-            opts[:resourcepool] = rps_list.call.gsub(/\s+/, "").split(",")
+            opts[:resourcepool] = rps_list.call.gsub(/\s+/, '').split(',')
 
         when 'n' || 'no'
             opts[:type]='fixed'
-            puts "choose the proper name"
+            puts 'choose the proper name'
             opts[:resourcepool] = rps_list.call
         else
             opts[:type]='default'
         end
 
-        return opts
+        opts
     end
 
     def self.network_dialogue(n)
-        ask = ->(question, default = ""){
+        ask = lambda {|question, default = ''|
             STDOUT.print question
             answer = STDIN.gets.strip
 
@@ -372,59 +398,63 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
 
         STDOUT.print "\n- Network: \e[92m#{n[:name]}\e[39m\n\n"\
 
-        opts = { size: "255", type: "ether" }
+        opts = { :size => '255', :type => 'ether' }
 
-		question =  "    How many VMs are you planning"\
-					" to fit into this network [255]? "
-        opts[:size] = ask.call(question, "255")
+        question =  '    How many VMs are you planning'\
+                    ' to fit into this network [255]? '
+        opts[:size] = ask.call(question, '255')
 
-		question = "    What type of Virtual Network"\
-				   " do you want to create (IPv[4],IPv[6], [E]thernet)? "
-        type_answer = ask.call(question, "ether")
+        question = '    What type of Virtual Network'\
+                   ' do you want to create (IPv[4],IPv[6], [E]thernet)? '
+        type_answer = ask.call(question, 'ether')
 
-        supported_types = ["4","6","ether", "e", "ip4", "ip6" ]
+        supported_types = %w[4 6 ether e ip4 ip6]
         if !supported_types.include?(type_answer)
-            type_answer =  'e'
-			STDOUT.puts "    Type [#{type_answer}] not supported,"\
-						" defaulting to Ethernet."
+            type_answer = 'e'
+            STDOUT.puts "    Type [#{type_answer}] not supported,"\
+                        ' defaulting to Ethernet.'
         end
-        question_ip  = "    Please input the first IP in the range: "
-        question_mac = "    Please input the first MAC in the range [Enter for default]: "
+        question_ip =
+            '    Please input the first IP in the range: '
+        question_mac =
+            '    Please input the first MAC in the range [Enter for default]: '
 
         case type_answer.downcase
-		when "4", "ip4"
-			opts[:ip]  = ask.call(question_ip)
-			opts[:mac] = ask.call(question_mac)
-            opts[:type] = "ip"
-		when "6", "ip6"
-			opts[:mac] = ask.call(question_mac)
+        when '4', 'ip4'
+            opts[:ip]  = ask.call(question_ip)
+            opts[:mac] = ask.call(question_mac)
+            opts[:type] = 'ip'
+        when '6', 'ip6'
+            opts[:mac] = ask.call(question_mac)
 
-			question =   "    Do you want to use SLAAC "\
-						 "Stateless Address Autoconfiguration? ([y]/n): "
+            question =   '    Do you want to use SLAAC '\
+                         'Stateless Address Autoconfiguration? ([y]/n): '
             slaac_answer = ask.call(question, 'y').downcase
 
-			if slaac_answer == 'n'
-				question =  "    Please input the IPv6 address (cannot be empty): "
-				opts[:ip6] = ask.call(question)
+            if slaac_answer == 'n'
+                question =
+                    '    Please input the IPv6 address (cannot be empty): '
+                opts[:ip6] = ask.call(question)
 
-				question =  "    Please input the Prefix length (cannot be empty): "
-				opts[:prefix_length] = ask.call(question)
+                question =
+                    '    Please input the Prefix length (cannot be empty): '
+                opts[:prefix_length] = ask.call(question)
                 opts[:type] = 'ip6_static'
-			else
-				question =  "    Please input the GLOBAL PREFIX "\
-							"[Enter for default]: "
-				opts[:global_prefix] = ask.call(question)
+            else
+                question = '    Please input the GLOBAL PREFIX '\
+                            '[Enter for default]: '
+                opts[:global_prefix] = ask.call(question)
 
-				question=  "    Please input the ULA PREFIX "\
-						   "[Enter for default]: "
-				opts[:ula_prefix] = ask.call(question)
-                opts[:type]       = 'ip6'
-			end
-		when "e", "ether"
-			opts[:mac] = ask.call(question_mac)
-		end
+                question= '    Please input the ULA PREFIX '\
+                           '[Enter for default]: '
+                opts[:ula_prefix] = ask.call(question)
+                opts[:type] = 'ip6'
+            end
+        when 'e', 'ether'
+            opts[:mac] = ask.call(question_mac)
+        end
 
-        return opts
+        opts
     end
 
     def clear_tags(vmid)
@@ -469,7 +499,7 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
             end
         end
 
-        return vm, keys_to_remove
+        [vm, keys_to_remove]
     end
 
     def remove_keys(vm, keys_to_remove)
diff --git a/src/datastore_mad/remotes/vcenter_downloader.rb b/src/datastore_mad/remotes/vcenter_downloader.rb
index f558764384..c684386d92 100755
--- a/src/datastore_mad/remotes/vcenter_downloader.rb
+++ b/src/datastore_mad/remotes/vcenter_downloader.rb
@@ -19,13 +19,13 @@
 ONE_LOCATION = ENV['ONE_LOCATION'] unless defined?(ONE_LOCATION)
 
 if !ONE_LOCATION
-    RUBY_LIB_LOCATION = '/usr/lib/one/ruby' unless defined?(RUBY_LIB_LOCATION)
-    GEMS_LOCATION     = '/usr/share/one/gems' unless defined?(GEMS_LOCATION)
-    VAR_LOCATION      = '/var/lib/one' unless defined?(VAR_LOCATION)
+    RUBY_LIB_LOCATION ||= '/usr/lib/one/ruby'
+    GEMS_LOCATION     ||= '/usr/share/one/gems'
+    VAR_LOCATION      ||= '/var/lib/one'
 else
-    RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby' unless defined?(RUBY_LIB_LOCATION)
-    GEMS_LOCATION     = ONE_LOCATION + '/share/gems' unless defined?(GEMS_LOCATION)
-    VAR_LOCATION      = ONE_LOCATION + '/var' unless defined?(VAR_LOCATION)
+    RUBY_LIB_LOCATION ||= ONE_LOCATION + '/lib/ruby'
+    GEMS_LOCATION     ||= ONE_LOCATION + '/share/gems'
+    VAR_LOCATION      ||= ONE_LOCATION + '/var'
 end
 
 if File.directory?(GEMS_LOCATION)
@@ -46,7 +46,7 @@ require 'addressable'
 vcenter_url     = Addressable::URI.parse(ARGV[0])
 
 params          = CGI.parse(vcenter_url.query)
-ds_id           = params["param_dsid"][0]
+ds_id           = params['param_dsid'][0]
 
 begin
     vi_client = VCenterDriver::VIClient.new_from_datastore(ds_id)
@@ -57,9 +57,13 @@ begin
     ds = VCenterDriver::Datastore.new_from_ref(source_ds_ref, vi_client)
 
     VCenterDriver::FileHelper.dump_vmdk_tar_gz(vcenter_url, ds)
-rescue Exception => e
-    STDERR.puts "Cannot download image #{vcenter_url.path} from datastore #{ds_id} "\
-                "Reason: \"#{e.message}\"\n#{e.backtrace}"
+rescue StandardError => e
+    STDERR.puts "Cannot download image #{vcenter_url.path}"\
+                " from datastore #{ds_id} "\
+                "Reason: \"#{e.message}\"}"
+    if VCenterDriver::CONFIG[:debug_information]
+        STDERR.puts "#{e.backtrace}"
+    end
     exit(-1)
 ensure
     vi_client.close_connection if vi_client
diff --git a/src/datastore_mad/remotes/vcenter_uploader.rb b/src/datastore_mad/remotes/vcenter_uploader.rb
index 9fb2508fea..ed081fc021 100755
--- a/src/datastore_mad/remotes/vcenter_uploader.rb
+++ b/src/datastore_mad/remotes/vcenter_uploader.rb
@@ -19,11 +19,15 @@
 ONE_LOCATION = ENV['ONE_LOCATION'] unless defined?(ONE_LOCATION)
 
 if !ONE_LOCATION
-    RUBY_LIB_LOCATION = '/usr/lib/one/ruby' unless defined?(RUBY_LIB_LOCATION)
-    GEMS_LOCATION     = '/usr/share/one/gems' unless defined?(GEMS_LOCATION)
+    RUBY_LIB_LOCATION =
+        '/usr/lib/one/ruby' unless defined?(RUBY_LIB_LOCATION)
+    GEMS_LOCATION =
+        '/usr/share/one/gems' unless defined?(GEMS_LOCATION)
 else
-    RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby' unless defined?(RUBY_LIB_LOCATION)
-    GEMS_LOCATION     = ONE_LOCATION + '/share/gems' unless defined?(GEMS_LOCATION)
+    RUBY_LIB_LOCATION =
+        ONE_LOCATION + '/lib/ruby' unless defined?(RUBY_LIB_LOCATION)
+    GEMS_LOCATION =
+        ONE_LOCATION + '/share/gems' unless defined?(GEMS_LOCATION)
 end
 
 if File.directory?(GEMS_LOCATION)
@@ -47,7 +51,7 @@ begin
     ds = VCenterDriver::Datastore.new_from_ref(target_ds_ref, vi_client)
 
     # Setting "." as the source will read from the stdin
-    source_path = "." if source_path.nil?
+    source_path = '.' if source_path.nil?
 
     ds.create_directory(File.dirname(target_path))
 
@@ -56,12 +60,13 @@ begin
     end
 
     puts target_path
-
-rescue Exception => e
+rescue StandardError => e
     STDERR.puts "Cannot upload image to datastore #{ds_id} "\
-                "Reason: \"#{e.message}\"\n#{e.backtrace}"
-    exit -1
+                "Reason: \"#{e.message}\""
+    if VCenterDriver::CONFIG[:debug_information]
+        STDERR.puts "#{e.backtrace}"
+    end
+    exit(-1)
 ensure
     vi_client.close_connection if vi_client
 end
-
diff --git a/src/im_mad/remotes/lib/vcenter_cluster.rb b/src/im_mad/remotes/lib/vcenter_cluster.rb
index 556847225e..bc4565f52b 100644
--- a/src/im_mad/remotes/lib/vcenter_cluster.rb
+++ b/src/im_mad/remotes/lib/vcenter_cluster.rb
@@ -29,13 +29,13 @@ end
 # Logger
 ################################################################################
 $logger = Logger.new(
-            STDERR,
-            level: Logger::INFO,
-            datetime_format: '%Y-%m-%d %H:%M:%S',
-            formatter: proc { |severity, datetime, progname, msg|
-                        "#{datetime} [#{severity}]: #{msg}\n"
-            }
-        )
+    STDERR,
+    :level => Logger::INFO,
+    :datetime_format => '%Y-%m-%d %H:%M:%S',
+    :formatter => proc {|severity, datetime, _progname, msg|
+        "#{datetime} [#{severity}]: #{msg}\n"
+    }
+)
 
 #-------------------------------------------------------------------------------
 #  Set of vcenter clusters each one representing a opennebula host
@@ -48,8 +48,9 @@ $logger = Logger.new(
 #    @last_monitor_vm:   Timer for last monitor VM
 #-------------------------------------------------------------------------------
 class Cluster
+
     #---------------------------------------------------------------------------
-    #Constants
+    # Constants
     #  CLUSTER_PROPERTIES: ESX cluster properties
     #  RP_PROPERTIES: Resource pool properties
     #  VM_STATE_PROPERTIES: Properties for VM state changes
@@ -172,21 +173,21 @@ class Cluster
     def state_vm
         current_vm_states = vcenter_vms_state
 
-        # Check if we need a full sync
+        # Check if we need a full sync
         full_sync = false
         now = Time.now.to_i
-        if @last_sync.nil? or ((now - @last_sync) > VM_SYNC_TIME)
+        if @last_sync.nil? || ((now - @last_sync) > VM_SYNC_TIME)
             full_sync = true
             @last_sync = now
         end
 
-        str_info = ""
+        str_info = ''
         str_info << "SYNC_STATE=yes\nMISSING_STATE=#{VM_MISSING_STATE}\n" if full_sync
 
-        current_vm_states.each do |_,vm|
+        current_vm_states.each do |_, vm|
             vm_ref = vm[:deploy_id]
 
-            if full_sync or need_state_sync?(vm_ref, vm[:state])
+            if full_sync || need_state_sync?(vm_ref, vm[:state])
                 str_info << "VM = [ ID=\"#{vm[:id]}\", "
                 str_info << "DEPLOY_ID=\"#{vm[:deploy_id]}\", STATE=\"#{vm[:state]}\" ]\n"
             end
@@ -202,40 +203,46 @@ class Cluster
         view = @vic.vim
                    .serviceContent
                    .viewManager
-                   .CreateContainerView({
-                        container: @cluster.item,
-                        type:      ['VirtualMachine'],
-                        recursive: true
-                   })
+                   .CreateContainerView(
+                       {
+                           :container => @cluster.item,
+                            :type => ['VirtualMachine'],
+                            :recursive => true
+                       }
+                   )
 
         pc   = @vic.vim.serviceContent.propertyCollector
 
-        result = pc.RetrieveProperties(:specSet => [
-            RbVmomi::VIM.PropertyFilterSpec(
-                :objectSet => [
-                    :obj => view,
-                    :skip => true,
-                    :selectSet => [
-                        RbVmomi::VIM.TraversalSpec(
-                            :name => 'traverseEntities',
-                            :type => 'ContainerView',
-                            :path => 'view',
-                            :skip => false
-                        )
+        result = pc.RetrieveProperties(
+            :specSet => [
+                RbVmomi::VIM.PropertyFilterSpec(
+                    :objectSet => [
+                        :obj => view,
+                        :skip => true,
+                        :selectSet => [
+                            RbVmomi::VIM.TraversalSpec(
+                                :name => 'traverseEntities',
+                                :type => 'ContainerView',
+                                :path => 'view',
+                                :skip => false
+                            )
+                        ]
+                    ],
+                    :propSet => [
+                        {
+                            :type    => 'VirtualMachine',
+                            :pathSet => VM_STATE_PROPERTIES
+                        }
                     ]
-                ],
-
-                :propSet => [{
-                    :type    => 'VirtualMachine',
-                    :pathSet => VM_STATE_PROPERTIES
-                }]
-            )
-        ])
+                )
+            ]
+        )
 
         vms_hash = {}
 
         result.each do |r|
             next unless r.obj.is_a?(RbVmomi::VIM::VirtualMachine)
+
             vms_hash[r.obj._ref] = r.to_hash
         end
 
@@ -251,17 +258,17 @@ class Cluster
             one_id = -1
             ids    = vmpool.retrieve_xmlelements("/VM_POOL/VM[DEPLOY_ID = '#{vm_ref}']")
 
-            ids.select {|vm|
-                hid = vm["HISTORY_RECORDS/HISTORY/HID"]
+            ids.select do |vm|
+                hid = vm['HISTORY_RECORDS/HISTORY/HID']
 
                 if hid
                     hid.to_i == @host_id
                 else
                     false
                 end
-            }
+            end
 
-            one_id = ids[0]["ID"] if ids[0]
+            one_id = ids[0]['ID'] if ids[0]
             next if one_id.to_i == -1
 
             vms[vm_ref] = {
@@ -313,7 +320,7 @@ class Cluster
 
         resource_usage_summary = @cluster.item.GetResourceUsage()
 
-        real_total_cpu     = resource_usage_summary.cpuCapacityMHz.to_f
+        real_total_cpu = resource_usage_summary.cpuCapacityMHz.to_f
         real_used_cpu = resource_usage_summary.cpuUsedMHz.to_f
         total_memory  = resource_usage_summary.memCapacityMB.to_i
         used_mem      = resource_usage_summary.memUsedMB.to_i
@@ -328,11 +335,11 @@ class Cluster
 
         free_cpu = total_cpu - used_cpu
 
-        free_mem  = total_memory - used_mem
+        free_mem = total_memory - used_mem
 
         unindent(<<-EOS)
             HYPERVISOR = vcenter
-            USEDMEMORY = "#{(used_mem * 1024)}"
+            USEDMEMORY = "#{used_mem * 1024}"
             FREEMEMORY = "#{free_mem}"
             USEDCPU    = "#{used_cpu.to_i}"
             FREECPU    = "#{free_cpu.to_i}"
@@ -388,33 +395,38 @@ class Cluster
     def resource_pool_info(mhz_core)
         rp_list = @cluster.get_resource_pool_list
 
-        view = @vic.vim.serviceContent.viewManager.CreateContainerView({
-            container: @cluster.item,
-            type:      ['ResourcePool'],
-            recursive: true
-        })
+        view =
+            @vic.vim.serviceContent.viewManager.CreateContainerView(
+                {
+                    :container => @cluster.item,
+                    :type => ['ResourcePool'],
+                    :recursive => true
+                }
+            )
 
         pc     = @vic.vim.serviceContent.propertyCollector
-        result = pc.RetrieveProperties(:specSet => [
-            RbVmomi::VIM.PropertyFilterSpec(
-                :objectSet => [
-                    :obj       => view,
-                    :skip      => true,
-                    :selectSet => [
-                        RbVmomi::VIM.TraversalSpec(
-                            :name => 'traverseEntities',
-                            :type => 'ContainerView',
-                            :path => 'view',
-                            :skip => false
-                        )
-                    ]
-                ],
-                :propSet => [{
-                    :type    => 'ResourcePool',
-                    :pathSet => RP_PROPERTIES
-                }]
-            )
-        ])
+        result = pc.RetrieveProperties(
+            :specSet => [
+                RbVmomi::VIM.PropertyFilterSpec(
+                    :objectSet => [
+                        :obj       => view,
+                        :skip      => true,
+                        :selectSet => [
+                            RbVmomi::VIM.TraversalSpec(
+                                :name => 'traverseEntities',
+                                :type => 'ContainerView',
+                                :path => 'view',
+                                :skip => false
+                            )
+                        ]
+                    ],
+                    :propSet => [{
+                        :type    => 'ResourcePool',
+                        :pathSet => RP_PROPERTIES
+                    }]
+                )
+            ]
+        )
 
         rps = {}
 
@@ -476,10 +488,10 @@ class Cluster
             mem_shares       = info['config.memoryAllocation.shares.shares']
 
             begin
-                rp_name = rp_list.select { |item|
+                rp_name = rp_list.select do |item|
                     item[:ref] == ref
-                }.first[:name]
-            rescue
+                end.first[:name]
+            rescue StandardError
                 rp_name = 'Resources'
             end
 
@@ -585,9 +597,9 @@ class Cluster
         elist.each do |ext_list|
             case ext_list.key
             when NSXDriver::NSXConstants::NSXV_EXTENSION_LIST
-                parts = ext_list.client[0].url.split("/")
+                parts = ext_list.client[0].url.split('/')
 
-                protocol = parts[0] + "//"
+                protocol = parts[0] + '//'
                 ip_port  = parts[2]
 
                 @nsx_obj['type']    = NSXDriver::NSXConstants::NSXV
@@ -619,12 +631,12 @@ class Cluster
     # Get a list vCenter datastores morefs
     #---------------------------------------------------------------------------
     def datastore_info
-        dc = @cluster.get_dc
+        dc = @cluster.datacenter
         ds = dc.datastore_folder
 
         ds_info = ''
 
-        ds.fetch!.each do |ref, ds|
+        ds.fetch!.each do |ref, _ds|
             ds_info << "VCENTER_DS_REF=\"#{ref}\"\n"
         end
 
@@ -661,9 +673,9 @@ class Cluster
 
         if create_nsx_client
             @nsx_client = NSXDriver::NSXClient.new_child(nsx_manager,
-                                                        nsx_user,
-                                                        nsx_password,
-                                                        nsx_type)
+                                                         nsx_user,
+                                                         nsx_password,
+                                                         nsx_type)
         end
 
         return '' if @nsx_client.nil?
@@ -689,7 +701,7 @@ class Cluster
             end
 
         else
-          raise "Unknown PortGroup type #{nsx_type}"
+            raise "Unknown PortGroup type #{nsx_type}"
         end
 
         nsx_info.chomp!(',')
@@ -713,6 +725,7 @@ end
 #
 #---------------------------------------------------------------------------
 class ClusterSet
+
     #---------------------------------------------------------------------------
     #  Constants
     #    CLUSTER_PROBES: to be executed. Each Cluster needs to respond to this
@@ -758,9 +771,9 @@ class ClusterSet
 
     # Del a host from the @cluster hash
     def del(hid)
-        @mutex.synchronize {
+        @mutex.synchronize do
             @clusters.delete(hid)
-        }
+        end
 
         $logger.info("Unregistered host #{hid}")
     end
@@ -778,7 +791,7 @@ class ClusterSet
         rc    = hpool.info
 
         if OpenNebula.is_error?(rc)
-            # Wait 5 seconds and retry
+            # Wait 5 seconds and retry
             sleep 5
             rc = hpool.info
             if OpenNebula.is_error?(rc)
@@ -786,10 +799,10 @@ class ClusterSet
             end
         end
 
-        $logger.info("Bootstraping list of clusters")
+        $logger.info('Bootstraping list of clusters')
 
         hpool.each do |h|
-            next if h['IM_MAD'] != 'vcenter' || h['STATE'] == '8' #offline
+            next if h['IM_MAD'] != 'vcenter' || h['STATE'] == '8' # offline
 
             $logger.info("Adding host #{h.name} (#{h.id})")
 
@@ -810,7 +823,7 @@ class ClusterSet
                 next if c[:cluster].nil?
 
                 if c[:monitordc].nil?
-                    next if conf[:address].nil? or conf[:port].nil?
+                    next if conf[:address].nil? || conf[:port].nil?
 
                     c[:monitordc] = MonitorClient.new(conf[:address],
                                                       conf[:port],
@@ -827,7 +840,8 @@ class ClusterSet
                         probe_frequency = conf[probe_name].to_i
                         next unless (Time.now.to_i - last_mon) > probe_frequency
 
-                        # Refresh the vCenter connection in the least frequent probe
+                        # Refresh the vCenter connection
+                        # in the least frequent probe
                         if probe_name.eql?(:system_host)
                             c[:cluster].connect_vcenter
                         end
@@ -872,4 +886,5 @@ class ClusterSet
             }
         end
     end
+
 end
diff --git a/src/im_mad/remotes/lib/vcenter_monitor_vms.rb b/src/im_mad/remotes/lib/vcenter_monitor_vms.rb
index e9d35da8f4..1a443fa574 100755
--- a/src/im_mad/remotes/lib/vcenter_monitor_vms.rb
+++ b/src/im_mad/remotes/lib/vcenter_monitor_vms.rb
@@ -19,17 +19,17 @@
 ONE_LOCATION ||= ENV['ONE_LOCATION']
 
 if !ONE_LOCATION
-  RUBY_LIB_LOCATION ||= '/usr/lib/one/ruby'
-  GEMS_LOCATION     ||= '/usr/share/one/gems'
+    RUBY_LIB_LOCATION ||= '/usr/lib/one/ruby'
+    GEMS_LOCATION     ||= '/usr/share/one/gems'
 else
-  RUBY_LIB_LOCATION ||= ONE_LOCATION + '/lib/ruby'
-  GEMS_LOCATION     ||= ONE_LOCATION + '/share/gems'
+    RUBY_LIB_LOCATION ||= ONE_LOCATION + '/lib/ruby'
+    GEMS_LOCATION     ||= ONE_LOCATION + '/share/gems'
 end
 
 if File.directory?(GEMS_LOCATION)
-  $LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
-  require 'rubygems'
-  Gem.use_paths(File.realpath(GEMS_LOCATION))
+    $LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
+    require 'rubygems'
+    Gem.use_paths(File.realpath(GEMS_LOCATION))
 end
 
 $LOAD_PATH << RUBY_LIB_LOCATION
@@ -44,22 +44,22 @@ vm_type = ARGV[1]
 ccr     = ARGV[2]
 
 begin
-  vi_client = VCenterDriver::VIClient.new_from_host(host_id)
+    vi_client = VCenterDriver::VIClient.new_from_host(host_id)
 
-  cluster = VCenterDriver::ClusterComputeResource.new_from_ref(ccr, vi_client)
+    cluster = VCenterDriver::ClusterComputeResource.new_from_ref(ccr, vi_client)
 
-  str_info , _ltime = cluster.monitor_vms(host_id, vm_type)
+    str_info, _ltime = cluster.monitor_vms(host_id, vm_type)
 
-  puts str_info
+    puts str_info
 rescue StandardError => e
-  message =  "Monitoring of VMs on vCenter cluster #{host_id} " \
-               " failed due to \"#{e.message}\"."
-  OpenNebula.log_error(message)
-  if VCenterDriver::CONFIG[:debug_information]
-    STDERR.puts "#{message} #{e.backtrace}"
-  end
+    message = "Monitoring of VMs on vCenter cluster #{host_id} " \
+                 " failed due to \"#{e.message}\"."
+    OpenNebula.log_error(message)
+    if VCenterDriver::CONFIG[:debug_information]
+        STDERR.puts "#{message} #{e.backtrace}"
+    end
 
-  exit(-1)
+    exit(-1)
 ensure
-  vi_client.close_connection if vi_client
+    vi_client.close_connection if vi_client
 end
diff --git a/src/sunstone/opennebula_vmrc.rb b/src/sunstone/opennebula_vmrc.rb
index 96a404c6a9..9d9096b349 100644
--- a/src/sunstone/opennebula_vmrc.rb
+++ b/src/sunstone/opennebula_vmrc.rb
@@ -161,7 +161,7 @@ class OpenNebulaVMRC
 
         vm = VCenterDriver::VirtualMachine.new(vi_client, vm_ref, vm_id)
 
-        parameters = vm.get_html_console_parameters
+        parameters = vm.html_console_parameters
 
         data = {
             :host   => parameters[:host],
diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/host.rb b/src/vmm_mad/remotes/lib/vcenter_driver/host.rb
index 6a3a0ed6d0..b624cd6a41 100644
--- a/src/vmm_mad/remotes/lib/vcenter_driver/host.rb
+++ b/src/vmm_mad/remotes/lib/vcenter_driver/host.rb
@@ -890,7 +890,7 @@ module VCenterDriver
             text
         end
 
-        def get_dc # rubocop:disable Naming/AccessorMethodName
+        def datacenter # rubocop:disable Naming/AccessorMethodName
             item = @item
 
             until item.instance_of? RbVmomi::VIM::Datacenter
diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb b/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb
index 193dd6be0d..f7fe3b736e 100644
--- a/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb
+++ b/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb
@@ -45,6 +45,9 @@ module VCenterDriver
     require 'vm_helper'
     require 'vm_monitor'
 
+    ############################################################################
+    # Class VirtualMachine
+    ############################################################################
     class VirtualMachine < VCenterDriver::Template
 
         # Supported access to VirtualMachineDevice classes:
@@ -56,20 +59,25 @@ module VCenterDriver
         include VirtualMachineHelper
         include VirtualMachineMonitor
 
-        ############################################################################
+        ########################################################################
         # Virtual Machine main Class
-        ############################################################################
+        ########################################################################
 
-        VM_PREFIX_DEFAULT = "one-$i-"
+        VM_PREFIX_DEFAULT = 'one-$i-'
 
-        POLL_ATTRIBUTE    = OpenNebula::VirtualMachine::Driver::POLL_ATTRIBUTE
-        VM_STATE          = OpenNebula::VirtualMachine::Driver::VM_STATE
+        POLL_ATTRIBUTE =
+            OpenNebula::VirtualMachine::Driver::POLL_ATTRIBUTE
+        VM_STATE =
+            OpenNebula::VirtualMachine::Driver::VM_STATE
 
-        DNET_CARD         = RbVmomi::VIM::VirtualEthernetCardDistributedVirtualPortBackingInfo
-        NET_CARD          = RbVmomi::VIM::VirtualEthernetCardNetworkBackingInfo
-        OPAQUE_CARD       = RbVmomi::VIM::VirtualEthernetCardOpaqueNetworkBackingInfo
+        DNET_CARD =
+            RbVmomi::VIM::VirtualEthernetCardDistributedVirtualPortBackingInfo
+        NET_CARD =
+            RbVmomi::VIM::VirtualEthernetCardNetworkBackingInfo
+        OPAQUE_CARD =
+            RbVmomi::VIM::VirtualEthernetCardOpaqueNetworkBackingInfo
 
-        VM_SHUTDOWN_TIMEOUT = 600 #10 minutes til poweroff hard
+        VM_SHUTDOWN_TIMEOUT = 600 # 10 minutes til poweroff hard
 
         attr_accessor :item, :vm_id
 
@@ -78,21 +86,23 @@ module VCenterDriver
         include Memoize
 
         def initialize(vi_client, ref, one_id)
-            if (ref)
+            if ref
                 @item = RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref)
                 check_item(@item, RbVmomi::VIM::VirtualMachine)
             end
 
+            super(@item, vi_client)
+
             @vi_client = vi_client
             @vm_id     = one_id
             @locking   = true
             @vm_info   = nil
             @disks     = {}
-            @nics = {macs: {}}
+            @nics = { :macs => {} }
         end
 
-        ############################################################################
-        ############################################################################
+        ########################################################################
+        ########################################################################
 
         # Attributes that must be defined when the VM does not exist in vCenter
         attr_accessor :vi_client
@@ -103,26 +113,32 @@ module VCenterDriver
         attr_writer :host
         attr_writer :target_ds_ref
 
-        ############################################################################
-        ############################################################################
+        ########################################################################
+        ########################################################################
 
         # The OpenNebula VM
         # @return OpenNebula::VirtualMachine or XMLElement
         def one_item
-            if !@one_item
-                if @vm_id != -1
-                    @one_item = VIHelper.one_item(OpenNebula::VirtualMachine, @vm_id)
-                else
-                    raise "VCenterDriver::Virtualmachine: OpenNebula ID is mandatory for this vm!"
+            unless @one_item
+
+                if @vm_id == -1
+                    raise 'VCenterDriver::Virtualmachine: '\
+                          'OpenNebula ID is mandatory for this vm!'
                 end
+
+                @one_item =
+                    VIHelper
+                    .one_item(
+                        OpenNebula::VirtualMachine,
+                        @vm_id
+                    )
             end
 
             @one_item
         end
 
-
         # set the vmware item directly to the vm
-        def set_item(item)
+        def item_update(item)
             @item = item
         end
 
@@ -141,7 +157,7 @@ module VCenterDriver
                 img_path = "#{folder}/#{file}-#{@vm_id}-#{disk_id}#{ext}"
             end
 
-            return img_path
+            img_path
         end
 
         # The OpenNebula host
@@ -150,11 +166,11 @@ module VCenterDriver
             if @host.nil?
                 if one_item.nil?
                     raise "'one_item' must be previously set to be able to " <<
-                          "access the OpenNebula host."
+                          'access the OpenNebula host.'
                 end
 
-                host_id = one_item["HISTORY_RECORDS/HISTORY[last()]/HID"]
-                raise "No valid host_id found." if host_id.nil?
+                host_id = one_item['HISTORY_RECORDS/HISTORY[last()]/HID']
+                raise 'No valid host_id found.' if host_id.nil?
 
                 @host = VIHelper.one_item(OpenNebula::Host, host_id)
             end
@@ -168,14 +184,18 @@ module VCenterDriver
             if @target_ds_ref.nil?
                 if one_item.nil?
                     raise "'one_item' must be previously set to be able to " <<
-                          "access the target Datastore."
+                          'access the target Datastore.'
                 end
 
-                target_ds_id = one_item["HISTORY_RECORDS/HISTORY[last()]/DS_ID"]
-                raise "No valid target_ds_id found." if target_ds_id.nil?
+                target_ds_id = one_item['HISTORY_RECORDS/HISTORY[last()]/DS_ID']
+                raise 'No valid target_ds_id found.' if target_ds_id.nil?
 
-                target_ds = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore,
-                                                            target_ds_id)
+                target_ds =
+                    VCenterDriver::VIHelper
+                    .one_item(
+                        OpenNebula::Datastore,
+                        target_ds_id
+                    )
 
                 @target_ds_ref = target_ds['TEMPLATE/VCENTER_DS_REF']
             end
@@ -183,67 +203,91 @@ module VCenterDriver
             @target_ds_ref
         end
 
-
         # Get a recommendation from a provided storagepod
         # Returns the recommended datastore reference
         def recommended_ds(ds_ref)
             # Fail if datastore is not a storage pod
-            raise "Cannot recommend from a non storagepod reference" if !ds_ref.start_with?('group-')
+            unless ds_ref.start_with?('group-')
+                raise 'Cannot recommend from a non storagepod reference'
+            end
 
             # Retrieve information needed to create storage_spec hash
-            storage_manager = vi_client.vim.serviceContent.storageResourceManager
-            vcenter_name = get_vcenter_name
-            vc_template = RbVmomi::VIM::VirtualMachine.new(vi_client.vim, get_template_ref)
-            dc = cluster.get_dc
-            vcenter_vm_folder_object = vcenter_folder(vcenter_folder_ref, vc_template, dc)
-			storpod = get_ds(ds_ref)
-            disk_move_type = calculate_disk_move_type(storpod, vc_template, linked_clones)
+            storage_manager =
+                vi_client
+                .vim
+                .serviceContent
+                .storageResourceManager
+            vcenter_name = vc_name
+            vc_template =
+                RbVmomi::VIM::VirtualMachine
+                .new(
+                    vi_client.vim,
+                    get_template_ref
+                )
+            dc = cluster.datacenter
+            vcenter_vm_folder_object = vcenter_folder(vcenter_folder_ref,
+                                                      vc_template, dc)
+            storpod = get_ds(ds_ref)
+            disk_move_type = calculate_disk_move_type(storpod, vc_template,
+                                                      linked_clones)
             spec_hash = spec_hash_clone(disk_move_type)
             clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(spec_hash)
 
             # Create hash needed to get the recommendation
             storage_spec = RbVmomi::VIM.StoragePlacementSpec(
-                type: 'clone',
-                cloneName: vcenter_name,
-                folder: vcenter_vm_folder_object,
-                podSelectionSpec: RbVmomi::VIM.StorageDrsPodSelectionSpec(storagePod: storpod),
-                vm: vc_template,
-                cloneSpec: clone_spec
+                :type => 'clone',
+                :cloneName => vcenter_name,
+                :folder => vcenter_vm_folder_object,
+                :podSelectionSpec =>
+                    RbVmomi::VIM
+                        .StorageDrsPodSelectionSpec(
+                            :storagePod => storpod
+                        ),
+                :vm => vc_template,
+                :cloneSpec => clone_spec
             )
 
             # Query a storage placement recommendation
             result = storage_manager
-                       .RecommendDatastores(storageSpec: storage_spec) rescue nil
-            raise "Could not get placement specification for StoragePod" if result.nil?
-            if !result.respond_to?(:recommendations) || result.recommendations.size == 0
-                raise "Could not get placement specification for StoragePod"
+                     .RecommendDatastores(
+                         :storageSpec => storage_spec
+                     ) rescue nil
+            if result.nil?
+                raise 'Could not get placement specification for StoragePod'
+            end
+
+            if !result.respond_to?(:recommendations) ||
+                result.recommendations.empty?
+                raise 'Could not get placement specification for StoragePod'
             end
 
             # Return recommended DS reference
             result.recommendations.first.action.first.destination._ref
-		end
+        end
 
         # Cached cluster
         # @return ClusterComputeResource
         def cluster
             if @cluster.nil?
                 ccr_ref = host['TEMPLATE/VCENTER_CCR_REF']
-                @cluster = ClusterComputeResource.new_from_ref(ccr_ref, vi_client)
+                @cluster = ClusterComputeResource.new_from_ref(ccr_ref,
+                                                               vi_client)
             end
 
             @cluster
         end
 
-        ############################################################################
-        ############################################################################
+        ########################################################################
+        ########################################################################
 
         # @return Boolean whether the VM exists in vCenter
         def new?
-            one_item["DEPLOY_ID"].empty?
+            one_item['DEPLOY_ID'].empty?
         end
 
         def wild?
-            !!(one_item['TEMPLATE/IMPORTED'] && one_item['TEMPLATE/IMPORTED'] == 'YES')
+            !(one_item['TEMPLATE/IMPORTED'] &&
+                one_item['TEMPLATE/IMPORTED'] == 'YES').nil?
         end
 
         # @return Boolean wheter the vm exists in OpenNebula
@@ -258,70 +302,77 @@ module VCenterDriver
             end
 
             vm_ref = self['_ref']
-            return nil if !vm_ref
+            return unless vm_ref
 
-            vc_uuid = get_vcenter_instance_uuid
+            vc_uuid = vcenter_instance_uuid
 
-            one_vm = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualMachinePool,
-                                                        "DEPLOY_ID",
-                                                        vm_ref,
-                                                        vc_uuid,
-                                                        vm_pool)
-            return nil if !one_vm
+            one_vm =
+                VCenterDriver::VIHelper
+                .find_by_ref(
+                    OpenNebula::VirtualMachinePool,
+                    'DEPLOY_ID',
+                    vm_ref,
+                    vc_uuid,
+                    vm_pool
+                )
+            return unless one_vm
 
-            @vm_id = one_vm["ID"]
-            return @vm_id
+            @vm_id = one_vm['ID']
+            @vm_id
         end
 
-        def get_vcenter_instance_uuid
+        def vcenter_instance_uuid
             @vi_client.vim.serviceContent.about.instanceUuid
         end
 
-        def disk_keys_get
+        def disk_keys
             unmanaged_keys = {}
             @item.config.extraConfig.each do |val|
-                u = val[:key].include?("opennebula.disk")
-                m = val[:key].include?("opennebula.mdisk")
+                u = val[:key].include?('opennebula.disk')
+                m = val[:key].include?('opennebula.mdisk')
                 unmanaged_keys[val[:key]] = val[:value] if u || m
             end
 
-            return unmanaged_keys
+            unmanaged_keys
         end
 
-        ############################################################################
+        ########################################################################
         # Getters
-        ############################################################################
+        ########################################################################
 
         # @return RbVmomi::VIM::ResourcePool
-        def get_rp
-
+        def resource_pool
             req_rp = one_item['VCENTER_RESOURCE_POOL'] ||
                     one_item['USER_TEMPLATE/VCENTER_RESOURCE_POOL']
 
-            #Get ref for req_rp
+            # Get ref for req_rp
             rp_list    = cluster.get_resource_pool_list
-            req_rp_ref = rp_list.select { |rp| rp[:name].downcase == req_rp.downcase }.first[:ref] rescue nil
-
+            req_rp_ref = rp_list.select do |rp|
+                             rp[:name].downcase == req_rp.downcase
+                         end.first[:ref] rescue nil
 
             if vi_client.rp_confined?
                 if req_rp_ref && req_rp_ref != vi_client.rp._ref
-                    raise "Available resource pool [#{vi_client.rp.name}] in host"\
-                          " does not match requested resource pool"\
+                    raise 'Available resource pool '\
+                          "[#{vi_client.rp.name}] in host"\
+                          ' does not match requested resource pool'\
                           " [#{req_rp}]"
                 end
 
-                return vi_client.rp
+                vi_client.rp
             else
                 if req_rp_ref
-                    rps = cluster.resource_pools.select{|r| r._ref == req_rp_ref }
+                    rps = cluster.resource_pools.select do |r|
+                        r._ref == req_rp_ref
+                    end
 
                     if rps.empty?
                         raise "No matching resource pool found (#{req_rp})."
-                    else
-                        return rps.first
                     end
+
+                    rps.first
                 else
-                    return cluster['resourcePool']
+                    cluster['resourcePool']
                 end
             end
         end
@@ -329,63 +380,71 @@ module VCenterDriver
         # @return RbVmomi::VIM::Datastore or nil
         def get_ds(current_ds_ref = nil)
             if !current_ds_ref
-                current_ds_id  = one_item["HISTORY_RECORDS/HISTORY[last()]/DS_ID"]
-                current_ds     = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore, current_ds_id)
+                current_ds_id =
+                    one_item[
+                        'HISTORY_RECORDS/HISTORY[last()]/DS_ID'
+                    ]
+                current_ds = VCenterDriver::VIHelper.one_item(
+                    OpenNebula::Datastore, current_ds_id
+                )
                 current_ds_ref = current_ds['TEMPLATE/VCENTER_DS_REF']
             end
 
             if current_ds_ref
-                dc = cluster.get_dc
+                dc = cluster.datacenter
 
                 ds_folder = dc.datastore_folder
                 ds = ds_folder.get(current_ds_ref)
-                ds_item = ds.item rescue nil
+                ds.item rescue nil
 
-                return ds_item
             else
-                return nil
+                nil
             end
         end
 
         # StorageResouceManager reference
-        def get_sm
+        def storagemanager
             self['_connection.serviceContent.storageResourceManager']
         end
 
         # @return Customization or nil
-        def get_customization
-            xpath = "USER_TEMPLATE/VCENTER_CUSTOMIZATION_SPEC"
+        def customization_spec
+            xpath = 'USER_TEMPLATE/VCENTER_CUSTOMIZATION_SPEC'
             customization_spec = one_item[xpath]
 
             if customization_spec.nil?
-                return nil
+                return
             end
 
             begin
-                custom_spec = vi_client.vim
-                                .serviceContent
-                                .customizationSpecManager
-                                .GetCustomizationSpec(:name => customization_spec)
+                custom_spec = vi_client
+                              .vim
+                              .serviceContent
+                              .customizationSpecManager
+                              .GetCustomizationSpec(
+                                  :name => customization_spec
+                              )
 
-                if custom_spec && (spec = custom_spec.spec)
-                    return spec
-                else
-                    raise "Error getting customization spec"
+                unless custom_spec && (spec = custom_spec.spec)
+                    raise 'Error getting customization spec'
                 end
-            rescue
+
+                spec
+            rescue StandardError
                 raise "Customization spec '#{customization_spec}' not found"
             end
         end
 
-        # @return VCenterDriver::Datastore datastore where the disk will live under
+        # @return VCenterDriver::Datastore datastore
+        # where the disk will live under
         def get_effective_ds(disk)
-            if disk["PERSISTENT"] == "YES"
-                ds_ref = disk["VCENTER_DS_REF"]
+            if disk['PERSISTENT'] == 'YES'
+                ds_ref = disk['VCENTER_DS_REF']
             else
                 ds_ref = target_ds_ref
 
                 if ds_ref.nil?
-                    raise "target_ds_ref must be defined on this object."
+                    raise 'target_ds_ref must be defined on this object.'
                 end
             end
 
@@ -393,20 +452,20 @@ module VCenterDriver
         end
 
         # @return String vcenter name
-        def get_vcenter_name
+        def vc_name
             vm_prefix = host['TEMPLATE/VM_PREFIX']
             vm_prefix = VM_PREFIX_DEFAULT if vm_prefix.nil? || vm_prefix.empty?
 
             if !one_item['USER_TEMPLATE/VM_PREFIX'].nil?
                 vm_prefix = one_item['USER_TEMPLATE/VM_PREFIX']
             end
-            vm_prefix.gsub!("$i", one_item['ID'])
+            vm_prefix.gsub!('$i', one_item['ID'])
 
-            vm_suffix = ""
+            vm_suffix = ''
             if !one_item['USER_TEMPLATE/VM_SUFFIX'].nil?
                 vm_suffix = one_item['USER_TEMPLATE/VM_SUFFIX']
             end
-            vm_suffix.gsub!("$i", one_item['ID'])
+            vm_suffix.gsub!('$i', one_item['ID'])
 
             vm_prefix + one_item['NAME'] + vm_suffix
         end
@@ -414,12 +473,12 @@ module VCenterDriver
         # @return vCenter Tags
         def vcenter_tags
             one_item.info if one_item.instance_of?(OpenNebula::VirtualMachine)
-            one_item.retrieve_xmlelements("USER_TEMPLATE/VCENTER_TAG")
+            one_item.retrieve_xmlelements('USER_TEMPLATE/VCENTER_TAG')
         end
 
         # @return if has vCenter Tags
         def vcenter_tags?
-            vcenter_tags.size > 0
+            !vcenter_tags.empty?
         end
 
         # @return if has cpuHotAddEnabled
@@ -434,7 +493,7 @@ module VCenterDriver
 
             one_item[
                 'USER_TEMPLATE/HOT_RESIZE/CPU_HOT_ADD_ENABLED'
-            ] == "YES"
+            ] == 'YES'
         end
 
         # @return if has memoryHotAddEnabled
@@ -449,19 +508,47 @@ module VCenterDriver
 
             one_item[
                 'USER_TEMPLATE/HOT_RESIZE/MEMORY_HOT_ADD_ENABLED'
-            ] == "YES"
+            ] == 'YES'
         end
 
-        ############################################################################
+        ########################################################################
         # Create and reconfigure VM related methods
-        ############################################################################
+        ########################################################################
 
-        # This function creates a new VM from the driver_action XML and returns the
+        # This function creates a new VM from the
+        # driver_action XML and returns the
         # VMware ref
         # @param drv_action XML representing the deploy action
         # @return String vmware ref
         def clone_vm(drv_action)
-            vcenter_name = get_vcenter_name
+            vcenter_name = vc_name
+
+            dc = cluster.datacenter
+
+            vcenter_vm_folder = drv_action['USER_TEMPLATE/VCENTER_VM_FOLDER']
+
+            if !vcenter_vm_folder.nil? && !vcenter_vm_folder.empty?
+                vcenter_vm_folder =
+                    vcenter_folder_name(vcenter_vm_folder, drv_action)
+
+                vcenter_vm_folder_object =
+                    dc.item.find_folder(vcenter_vm_folder)
+
+                if vcenter_vm_folder_object.nil?
+                    begin
+                        dc.item.vmFolder.CreateFolder(
+                            :name => vcenter_vm_folder
+                        )
+                    rescue StandardError => e
+                        error_message = e.message
+                        if VCenterDriver::CONFIG[:debug_information]
+                            error_message += ' ' + e.backtrace
+                        end
+                        raise 'Cannot create Folder in vCenter:'\
+                              "#{error_message}"
+                    end
+                end
+            end
 
             dc = cluster.get_dc
 
@@ -490,11 +577,15 @@ module VCenterDriver
             end
 
             vc_template_ref = drv_action['USER_TEMPLATE/VCENTER_TEMPLATE_REF']
-            vc_template = RbVmomi::VIM::VirtualMachine(@vi_client.vim, vc_template_ref)
+            vc_template = RbVmomi::VIM::VirtualMachine(@vi_client.vim,
+                                                       vc_template_ref)
 
             ds = get_ds
 
-            asking_for_linked_clones = drv_action['USER_TEMPLATE/VCENTER_LINKED_CLONES']
+            asking_for_linked_clones =
+                drv_action[
+                    'USER_TEMPLATE/VCENTER_LINKED_CLONES'
+                ]
             disk_move_type = calculate_disk_move_type(ds,
                                                       vc_template,
                                                       asking_for_linked_clones)
@@ -503,15 +594,35 @@ module VCenterDriver
 
             clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(spec_hash)
 
-            vcenter_vm_folder_object = vcenter_folder(vcenter_vm_folder, vc_template, dc)
+            vcenter_vm_folder_object =
+                vcenter_folder(
+                    vcenter_vm_folder,
+                    vc_template,
+                    dc
+                )
 
             if ds.instance_of? RbVmomi::VIM::StoragePod
                 # VM is cloned using Storage Resource Manager for StoragePods
                 begin
-                    vm = storagepod_clonevm_task(vc_template, vcenter_name,
-                                                clone_spec, ds, vcenter_vm_folder_object, dc)
-                rescue Exception => e
-                    raise "Cannot clone VM Template to StoragePod: #{e.message}"
+                    opts = {
+                        :vc_template => vc_template,
+                        :vcenter_name => vcenter_name,
+                        :clone_spec => clone_spec,
+                        :storpod => ds,
+                        :vcenter_vm_folder_object => vcenter_vm_folder_object,
+                        :dc => dc
+                    }
+
+                    vm = storagepod_clonevm_task(opts)
+                rescue StandardError => e
+                    error =
+                        "Cannot clone VM Template to StoragePod: #{e.message}."
+
+                    if VCenterDriver::CONFIG[:debug_information]
+                        error += "\n\n#{e.backtrace}"
+                    end
+
+                    raise error
                 end
             else
                 vm = nil
@@ -519,8 +630,9 @@ module VCenterDriver
                     vm = vc_template.CloneVM_Task(
                         :folder => vcenter_vm_folder_object,
                         :name   => vcenter_name,
-                        :spec   => clone_spec).wait_for_completion
-                rescue Exception => e
+                        :spec   => clone_spec
+                    ).wait_for_completion
+                rescue StandardError => e
                     if !e.message.start_with?('DuplicateName')
                         raise "Cannot clone VM Template: #{e.message}"
                     end
@@ -528,21 +640,21 @@ module VCenterDriver
                     vm_folder = dc.vm_folder
                     vm_folder.fetch!
                     vm = vm_folder.items
-                            .select{|k,v| v.item.name == vcenter_name}
-                            .values.first.item rescue nil
+                                  .select {|_k, v| v.item.name == vcenter_name }
+                                  .values.first.item rescue nil
 
-                    if vm
-                        # Detach all persistent disks to avoid accidental destruction
-                        detach_persistent_disks(vm)
+                    raise "Cannot clone VM Template: #{e.message}" unless vm
 
-                        vm.Destroy_Task.wait_for_completion
-                        vm = vc_template.CloneVM_Task(
-                            :folder => vcenter_vm_folder_object,
-                            :name   => vcenter_name,
-                            :spec   => clone_spec).wait_for_completion
-                    else
-                        raise "Cannot clone VM Template: #{e.message}"
-                    end
+                    # Detach all persistent disks to
+                    # avoid accidental destruction
+                    detach_persistent_disks(vm)
+
+                    vm.Destroy_Task.wait_for_completion
+                    vm = vc_template.CloneVM_Task(
+                        :folder => vcenter_vm_folder_object,
+                        :name   => vcenter_name,
+                        :spec   => clone_spec
+                    ).wait_for_completion
                 end
             end
 
@@ -551,109 +663,166 @@ module VCenterDriver
 
             reference_unmanaged_devices(vc_template_ref)
 
-            return self['_ref']
+            self['_ref']
         end
 
-        # TODO: review
-        def storagepod_clonevm_task(vc_template, vcenter_name, clone_spec, storpod, vcenter_vm_folder_object, dc)
+        # This function clone a VM Template to StoragePod
+        # @param opts HASH with all parameters need it to clone
+        # opts = {
+        #   :vc_template => vc_template,
+        #   :vcenter_name => vcenter_name,
+        #   :clone_spec => clone_spec,
+        #   :storpod => ds,
+        #   :vcenter_vm_folder_object => vcenter_vm_folder_object,
+        #   :dc => dc
+        # }
+        # @return vm (VirtualMachine)
+        def storagepod_clonevm_task(opts)
+            vc_template = opts[:vc_template]
+            vcenter_name = opts[:vcenter_name]
+            clone_spec = opts[:clone_spec]
+            storpod = opts[:storpod]
+            vcenter_vm_folder_object = opts[:vcenter_vm_folder_object]
+            dc = opts[:dc]
 
-            storage_manager = vc_template
-                                ._connection.serviceContent.storageResourceManager
+            storage_manager =
+                vc_template
+                ._connection
+                .serviceContent
+                .storageResourceManager
 
             storage_spec = RbVmomi::VIM.StoragePlacementSpec(
-                type: 'clone',
-                cloneName: vcenter_name,
-                folder: vcenter_vm_folder_object,
-                podSelectionSpec: RbVmomi::VIM.StorageDrsPodSelectionSpec(storagePod: storpod),
-                vm: vc_template,
-                cloneSpec: clone_spec
+                :type => 'clone',
+                :cloneName => vcenter_name,
+                :folder => vcenter_vm_folder_object,
+                :podSelectionSpec =>
+                    RbVmomi::VIM
+                        .StorageDrsPodSelectionSpec(
+                            :storagePod => storpod
+                        ),
+                :vm => vc_template,
+                :cloneSpec => clone_spec
             )
 
             # Query a storage placement recommendation
             result = storage_manager
-                        .RecommendDatastores(storageSpec: storage_spec) rescue nil
+                     .RecommendDatastores(
+                         :storageSpec => storage_spec
+                     ) rescue nil
 
-            raise "Could not get placement specification for StoragePod" if result.nil?
+            if result.nil?
+                raise 'Could not get placement specification for StoragePod'
+            end
 
-            if !result.respond_to?(:recommendations) || result.recommendations.size == 0
-                raise "Could not get placement specification for StoragePod"
+            if !result
+               .respond_to?(
+                   :recommendations
+               ) || result.recommendations.empty?
+                raise 'Could not get placement specification for StoragePod'
             end
 
             # Get recommendation key to be applied
             key = result.recommendations.first.key ||= ''
-            raise "Missing Datastore recommendation for StoragePod" if key.empty?
+            if key.empty?
+                raise 'Missing Datastore recommendation for StoragePod'
+            end
 
             begin
                 apply_sr = storage_manager
-                                .ApplyStorageDrsRecommendation_Task(key: [key])
-                                .wait_for_completion
-                return apply_sr.vm
-            rescue Exception => e
+                           .ApplyStorageDrsRecommendation_Task(:key => [key])
+                           .wait_for_completion
+                apply_sr.vm
+            rescue StandardError => e
                 if !e.message.start_with?('DuplicateName')
-                    raise "Cannot clone VM Template: #{e.message}\n#{e.backtrace}"
+                    raise 'Cannot clone VM Template: '\
+                          "#{e.message}\n#{e.backtrace}"
                 end
 
                 # The VM already exists, try to find the vm
                 vm_folder = dc.vm_folder
                 vm_folder.fetch!
                 vm = vm_folder.items
-                        .select{|k,v| v.item.name == vcenter_name}
-                        .values.first.item rescue nil
+                              .select {|_k, v| v.item.name == vcenter_name }
+                              .values.first.item rescue nil
 
                 if vm
 
                     begin
-                        # Detach all persistent disks to avoid accidental destruction
+                        # Detach all persistent disks to
+                        # avoid accidental destruction
                         detach_persistent_disks(vm)
 
                         # Destroy the VM with any disks still attached to it
                         vm.Destroy_Task.wait_for_completion
 
                         # Query a storage placement recommendation
-                        result = storage_manager.RecommendDatastores(storageSpec: storage_spec) rescue nil
+                        result =
+                            storage_manager
+                            .RecommendDatastores(
+                                :storageSpec => storage_spec
+                            ) rescue nil
 
-                        raise "Could not get placement specification for StoragePod" if result.nil?
+                        if result.nil?
+                            raise 'Could not get placement specification '\
+                                  'for StoragePod'
+                        end
 
-                        if !result.respond_to?(:recommendations) || result.recommendations.size == 0
-                            raise "Could not get placement specification for StoragePod"
+                        if !result
+                           .respond_to?(
+                               :recommendations
+                           ) ||
+                            result
+                           .recommendations.empty?
+                            raise 'Could not get placement '\
+                                  'specification for StoragePod'
                         end
 
                         # Get recommendation key to be applied
                         key = result.recommendations.first.key ||= ''
-                        raise "Missing Datastore recommendation for StoragePod" if key.empty?
+                        if key.empty?
+                            raise 'Missing Datastore recommendation '\
+                                  ' for StoragePod'
+                        end
 
-                        apply_sr = storage_manager
-                                .ApplyStorageDrsRecommendation_Task(key: [key])
-                                .wait_for_completion
-                        return apply_sr.vm
-                    rescue Exception => e
-                      raise "Failure applying recommendation while cloning VM: #{e.message}"
+                        apply_sr =
+                            storage_manager
+                            .ApplyStorageDrsRecommendation_Task(
+                                :key => [key]
+                            )
+                            .wait_for_completion
+                        apply_sr.vm
+                    rescue StandardError => e
+                        raise 'Failure applying recommendation while '\
+                              "cloning VM: #{e.message}"
                     end
                 end
             end
         end
 
-
         # Calculates how to move disk backinggs from the
         # vCenter VM Template moref
         def calculate_disk_move_type(ds, vc_template, use_linked_clones)
             # Default disk move type (Full Clone)
             disk_move_type = :moveAllDiskBackingsAndDisallowSharing
 
-            if ds.instance_of? RbVmomi::VIM::Datastore
-                if use_linked_clones && use_linked_clones.downcase == 'yes'
-                    # Check if all disks in template has delta disks
-                    disks = vc_template.config
-                                       .hardware
-                                       .device
-                                       .grep(RbVmomi::VIM::VirtualDisk)
+            if ds.instance_of?(
+                RbVmomi::VIM::Datastore &&
+                    use_linked_clones &&
+                    use_linked_clones.downcase == 'yes'
+            )
+                # Check if all disks in template has delta disks
+                disks = vc_template.config
+                                   .hardware
+                                   .device
+                                   .grep(RbVmomi::VIM::VirtualDisk)
 
-                    disks_no_delta = disks.select { |d| d.backing.parent == nil }
+                disks_no_delta = disks.select do |d|
+                    d.backing.parent.nil?
+                end
 
-                    # Can use linked clones if all disks have delta disks
-                    if (disks_no_delta.size == 0)
-                        disk_move_type = :moveChildMostDiskBacking
-                    end
+                # Can use linked clones if all disks have delta disks
+                if disks_no_delta.empty?
+                    disk_move_type = :moveChildMostDiskBacking
                 end
             end
 
@@ -677,32 +846,40 @@ module VCenterDriver
         def vcenter_folder(vcenter_vm_folder, vc_template, dc)
             vcenter_vm_folder_object = nil
 
-            if !!vcenter_vm_folder && !vcenter_vm_folder.empty?
-                vcenter_vm_folder_object = dc.item.find_folder(vcenter_vm_folder)
+            if !vcenter_vm_folder.nil? && !vcenter_vm_folder.empty?
+                vcenter_vm_folder_object =
+                    dc
+                    .item
+                    .find_folder(
+                        vcenter_vm_folder
+                    )
             end
 
-            vcenter_vm_folder_object = vc_template.parent if vcenter_vm_folder_object.nil?
+            vcenter_vm_folder_object =
+                vc_template
+                .parent if vcenter_vm_folder_object.nil?
             vcenter_vm_folder_object
         end
 
-
         # @return clone parameters spec hash
         def spec_hash_clone(disk_move_type)
             # Relocate spec
             relocate_spec_params = {}
 
-            relocate_spec_params[:pool] = get_rp
+            relocate_spec_params[:pool] = resource_pool
             relocate_spec_params[:diskMoveType] = disk_move_type
 
             ds = get_ds
 
-            relocate_spec_params[:datastore] = ds if ds.instance_of? RbVmomi::VIM::Datastore
+            relocate_spec_params[:datastore] =
+                ds if ds.instance_of? RbVmomi::VIM::Datastore
 
             relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(
-                                                            relocate_spec_params)
+                relocate_spec_params
+            )
 
             # Running flag - prevents spurious poweroff states in the VM
-            running_flag = [{ :key => "opennebula.vm.running", :value => "no"}]
+            running_flag = [{ :key => 'opennebula.vm.running', :value => 'no' }]
 
             running_flag_spec = RbVmomi::VIM.VirtualMachineConfigSpec(
                 { :extraConfig => running_flag }
@@ -715,46 +892,46 @@ module VCenterDriver
                 :config   => running_flag_spec
             }
 
-            cs = get_customization
+            cs = customization_spec
             clone_parameters[:customization] = cs if cs
 
             clone_parameters
         end
 
-        ############################################################################
+        ########################################################################
         # VirtualMachine Resource model methods
-        ############################################################################
+        ########################################################################
 
         #
-        #gets the representation of the nics
+        # gets the representation of the nics
         #
-        #@return [Hash(String => self.Nic)
+        # @return [Hash(String => self.Nic)
         def nics
             if !@nics[:macs].empty?
-                return @nics.reject{|k| k == :macs}
+                return @nics.reject {|k| k == :macs }
             end
 
             info_nics
         end
 
-        #gets the representation of the disks
+        # gets the representation of the disks
         #
-        #@return [Hash(String => self.Disk)
+        # @return [Hash(String => self.Disk)
         def disks
             return @disks unless @disks.empty?
 
             info_disks
         end
 
-        #iterate over the nics model
+        # iterate over the nics model
         #
-        #@param condition[Symbol] selects nics that matches certain condition
-        #see Self.Nic|Resource class to see some methods: :exits?, :one?...
+        # @param condition[Symbol] selects nics that matches certain condition
+        # see Self.Nic|Resource class to see some methods: :exits?, :one?...
         #
-        #@return yield the nic
+        # @return yield the nic
         def nics_each(condition)
             res = []
-            nics.each do |id, nic|
+            nics.each do |_id, nic|
                 next unless nic.method(condition).call
 
                 yield nic if block_given?
@@ -765,15 +942,15 @@ module VCenterDriver
             res
         end
 
-        #iterate over the disks model
+        # iterate over the disks model
         #
-        #@param condition[Symbol] selects disks that matches certain condition
-        #see Self.Disk|Resource class to see some methods: :exits?, :one?...
+        # @param condition[Symbol] selects disks that matches certain condition
+        # see Self.Disk|Resource class to see some methods: :exits?, :one?...
         #
-        #@return yield the disk
+        # @return yield the disk
         def disks_each(condition)
             res = []
-            disks.each do |id, disk|
+            disks.each do |_id, disk|
                 next unless disk.method(condition).call
 
                 yield disk if block_given?
@@ -785,12 +962,12 @@ module VCenterDriver
         end
 
         def disks_synced?
-            disks_each(:unsynced?){ return false }
+            disks_each(:unsynced?) { return false }
 
             true
         end
 
-        def get_template_ref
+        def template_ref_get
             one_item['USER_TEMPLATE/VCENTER_TEMPLATE_REF']
         end
 
@@ -799,15 +976,15 @@ module VCenterDriver
         end
 
         # Queries to OpenNebula the machine disks xml representation
-        def get_one_disks
+        def one_disks_list
             one_item.info if one_item.instance_of?(OpenNebula::VirtualMachine)
-            one_item.retrieve_xmlelements("TEMPLATE/DISK")
+            one_item.retrieve_xmlelements('TEMPLATE/DISK')
         end
 
         # Queries to OpenNebula the machine nics xml representation
         def one_nics_get
             one_item.info if one_item.instance_of?(OpenNebula::VirtualMachine)
-            one_item.retrieve_xmlelements("TEMPLATE/NIC")
+            one_item.retrieve_xmlelements('TEMPLATE/NIC')
         end
 
         def linked_clones
@@ -816,10 +993,13 @@ module VCenterDriver
 
         # perform a query to vCenter asking for the OpenNebula disk
         #
-        # @param one_disk [XMLelement]  The OpenNebula object representation of the disk
-        # @param keys [Hash (String => String)] Hashmap with the unmanaged keys
-        # @param vc_disks [Array (vcenter_disks)] Array of the machine real disks
-        # see vcenter_disks_get method
+        # @param one_disk [XMLelement]  The OpenNebula object
+        #                 representation of the disk
+        # @param keys [Hash (String => String)] Hashmap with
+        #             the unmanaged keys
+        # @param vc_disks [Array (vcenter_disks)] Array of
+        #                 the machine real disks
+        # See vcenter_disks_get method
         #
         # @return [vCenter_disk] the proper disk
         def query_disk(one_disk, keys, vc_disks)
@@ -834,7 +1014,7 @@ module VCenterDriver
             end
 
             if key
-                query = vc_disks.select {|dev| key == dev[:key]}
+                query = vc_disks.select {|dev| key == dev[:key] }
             else
                 if snapshots?
                     error = 'Disk metadata not present and snapshots exist. ' \
@@ -842,16 +1022,16 @@ module VCenterDriver
                     raise error
                 end
 
-                # Try to find the disk using the path known by OpenNebula
+                # Try to find the disk using the path known by OpenNebula
                 source_path = one_disk['SOURCE']
                 calculated_path = disk_real_path(one_disk, index)
-                query = vc_disks.select { |dev|
+                query = vc_disks.select do |dev|
                     source_path == dev[:path_wo_ds] ||
                     calculated_path == dev[:path_wo_ds]
-                }
+                end
             end
 
-            return nil if query.size != 1
+            return if query.size != 1
 
             query.first
         end
@@ -863,7 +1043,7 @@ module VCenterDriver
         #
         # @return [vCenter_nic] the proper nic
         def query_nic(mac, vc_nics)
-            nic = vc_nics.select{|dev| dev.macAddress == mac }.first
+            nic = vc_nics.select {|dev| dev.macAddress == mac }.first
 
             vc_nics.delete(nic) if nic
         end
@@ -873,14 +1053,14 @@ module VCenterDriver
         #
         # @return [Hash ("String" => self.Nic)] Model representation of nics
         def info_nics
-            @nics = {macs: {}}
+            @nics = { :macs => {} }
 
-            vc_nics  = vcenter_nics_get
+            vc_nics  = vcenter_nics_list
             one_nics = one_nics_get
 
             one_nics.each do |one_nic|
-                index  = one_nic["NIC_ID"]
-                mac    = one_nic["MAC"]
+                index  = one_nic['NIC_ID']
+                mac    = one_nic['MAC']
                 vc_dev = query_nic(mac, vc_nics)
 
                 if vc_dev
@@ -894,17 +1074,20 @@ module VCenterDriver
             vc_nics.each do |d|
                 backing = d.backing
 
-                if backing.class == NET_CARD
+                case backing.class
+                when NET_CARD
                     key = backing.network._ref
-                elsif backing.class == DNET_CARD
+                when DNET_CARD
                     key = backing.port.portgroupKey
-                elsif backing.class == OPAQUE_CARD
+                when OPAQUE_CARD
                     # Select only Opaque Networks
-                    opaqueNetworks = @item.network.select{|net|
-                        RbVmomi::VIM::OpaqueNetwork == net.class}
-                    opaqueNetwork = opaqueNetworks.find{|opn|
-                        backing.opaqueNetworkId == opn.summary.opaqueNetworkId}
-                    key = opaqueNetwork._ref
+                    opaque_networks = @item.network.select do |net|
+                        RbVmomi::VIM::OpaqueNetwork == net.class
+                    end
+                    opaque_network = opaque_networks.find do |opn|
+                        backing.opaqueNetworkId == opn.summary.opaqueNetworkId
+                    end
+                    key = opaque_network._ref
                 else
                     raise "Unsupported network card type: #{backing.class}"
                 end
@@ -912,7 +1095,7 @@ module VCenterDriver
                 @nics["#{key}#{d.key}"] = Nic.vc_nic(d)
             end
 
-            @nics.reject{|k| k == :macs}
+            @nics.reject {|k| k == :macs }
         end
 
         # Refresh VcenterDriver machine disks model, does not perform any
@@ -922,12 +1105,12 @@ module VCenterDriver
         def info_disks
             @disks = {}
 
-            keys = disk_keys_get
+            keys = disk_keys
             vc_disks  = vcenter_disks_get
-            one_disks = get_one_disks
+            one_disks = one_disks_list
 
             one_disks.each do |one_disk|
-                index = one_disk["DISK_ID"]
+                index = one_disk['DISK_ID']
 
                 disk = query_disk(one_disk, keys, vc_disks)
 
@@ -940,7 +1123,7 @@ module VCenterDriver
                 end
             end
 
-            vc_disks.each {|d| @disks[d[:path_wo_ds]] = Disk.vc_disk(d)}
+            vc_disks.each {|d| @disks[d[:path_wo_ds]] = Disk.vc_disk(d) }
 
             @disks
         end
@@ -962,16 +1145,24 @@ module VCenterDriver
             return @nics[index] if @nics[index] && opts[:sync].nil?
 
             if is_mac
-                one_nic = one_item.retrieve_xmlelements("TEMPLATE/NIC[MAC='#{mac}']").first rescue nil
+                one_nic =
+                    one_item
+                    .retrieve_xmlelements(
+                        "TEMPLATE/NIC[MAC='#{mac}']"
+                    ).first rescue nil
                 index = one_nic['NIC_ID'] if one_nic
             else
-                one_nic = one_item.retrieve_xmlelements("TEMPLATE/NIC[NIC_ID='#{index}']").first rescue nil
+                one_nic =
+                    one_item
+                    .retrieve_xmlelements(
+                        "TEMPLATE/NIC[NIC_ID='#{index}']"
+                    ).first rescue nil
                 mac     = one_nic['MAC'] if one_nic
             end
 
             raise "nic #{index} not found" unless one_nic
 
-            vc_nics = vcenter_nics_get
+            vc_nics = vcenter_nics_list
             vc_nic  = query_nic(mac, vc_nics)
 
             if vc_nic
@@ -991,12 +1182,20 @@ module VCenterDriver
 
             return @disks[index] if @disks[index] && opts[:sync].nil?
 
-            one_disk = one_item.retrieve_xmlelements("TEMPLATE/DISK[DISK_ID='#{index}']").first rescue nil
+            one_disk =
+                one_item
+                .retrieve_xmlelements(
+                    "TEMPLATE/DISK[DISK_ID='#{index}']"
+                ).first rescue nil
 
             raise "disk #{index} not found" unless one_disk
 
-            keys = opts[:keys].nil? ? disk_keys_get : opts[:keys]
-            vc_disks = opts[:disks].nil? ? vcenter_disks_get : opts[:disks]
+            opts[:keys].nil? ? keys = disk_keys : keys = opts[:keys]
+            if opts[:disks].nil?
+                vc_disks = vcenter_disks_get
+            else
+                vc_disks = opts[:disks]
+            end
             vc_disk = query_disk(one_disk, keys, vc_disks)
 
             if vc_disk
@@ -1006,51 +1205,73 @@ module VCenterDriver
             end
         end
 
-        # Matches disks from the vCenter VM Template (or VM if it is coming
+        # Matches disks from the vCenter VM Template (or VM if it is coming
         # from a Wild VM) with the disks represented in OpenNebula VM
         # data model (ie, the XML)
         def reference_unmanaged_devices(template_ref, execute = true)
-            extraconfig   = []
             device_change = []
             spec          = {}
 
             # Get unmanaged disks in OpenNebula's VM template
-            xpath = "TEMPLATE/DISK[OPENNEBULA_MANAGED=\"NO\" or OPENNEBULA_MANAGED=\"no\"]"
+            xpath =
+                'TEMPLATE/DISK[OPENNEBULA_MANAGED="NO" '\
+                'or OPENNEBULA_MANAGED="no"]'
             unmanaged_disks = one_item.retrieve_xmlelements(xpath)
 
             managed = false
-            extraconfig = reference_disks(template_ref, unmanaged_disks, managed)
+            extraconfig = reference_disks(template_ref, unmanaged_disks,
+                                          managed)
 
             # Add info for existing nics in template in vm xml
-            xpath = "TEMPLATE/NIC[OPENNEBULA_MANAGED=\"NO\" or OPENNEBULA_MANAGED=\"no\"]"
+            xpath =
+                'TEMPLATE/NIC[OPENNEBULA_MANAGED="NO" '\
+                'or OPENNEBULA_MANAGED="no"]'
             unmanaged_nics = one_item.retrieve_xmlelements(xpath)
 
-            # Handle NIC changes (different model and/or set MAC address
+            # Handle NIC changes (different model and/or set MAC address
             # for unmanaged nics
             begin
                 if !unmanaged_nics.empty?
-                    nics = vcenter_nics_get
+                    nics = vcenter_nics_list
 
-                    select_net =->(ref){
+                    # iterate over nics array and find nic with ref
+                    # or return nil if not exist
+                    select_net =lambda {|ref|
                         device = nil
                         nics.each do |nic|
                             type = nic.backing.class
-                            if type == NET_CARD
+
+                            case type
+                            when NET_CARD
                                 nref = nic.backing.network._ref
-                            elsif type == DNET_CARD
+                            when DNET_CARD
                                 nref = nic.backing.port.portgroupKey
-                            elsif type == OPAQUE_CARD
+                            when OPAQUE_CARD
                                 # Select only Opaque Networks
-                                opaqueNetworks = @item.network.select{|net|
-                                    RbVmomi::VIM::OpaqueNetwork == net.class}
-                                opaqueNetwork = opaqueNetworks.find{|opn|
-                                    nic.backing.opaqueNetworkId == opn.summary.opaqueNetworkId}
-                                nref = opaqueNetwork._ref
+                                opaque_networks = @item.network.select do |net|
+                                    RbVmomi::VIM::OpaqueNetwork == net.class
+                                end
+                                opaque_network =
+                                    opaque_networks
+                                    .find do |opn|
+                                        first_condition =
+                                            nic
+                                            .backing
+                                            .opaqueNetworkId
+                                        second_condition =
+                                            opn
+                                            .summary
+                                            .opaqueNetworkId
+                                        first_condition == second_condition
+                                    end
+                                nref = opaque_network._ref
                             else
-                                raise "Unsupported network card type: #{nic.backing.class}"
+                                raise 'Unsupported network card type: '\
+                                        "#{nic.backing.class}"
                             end
 
                             next unless nref == ref
+
                             device = nic
                             break
                         end
@@ -1062,32 +1283,53 @@ module VCenterDriver
                         end
                     }
 
+                    # Go over all unmanaged nics in order to sync
+                    # with vCenter Virtual Machine
                     unmanaged_nics.each do |unic|
                         vnic      = select_net.call(unic['VCENTER_NET_REF'])
                         nic_class = vnic.class if vnic
-                        new_model = Nic.nic_model_class(unic['MODEL']) if unic['MODEL']
+                        new_model =
+                            Nic
+                            .nic_model_class(
+                                unic['MODEL']
+                            ) if unic['MODEL']
 
+                        # if vnic is nil add a new device
                         if vnic.nil?
-                                device_change << calculate_add_nic_spec(unic)
+                            device_change << calculate_add_nic_spec(unic)
                         # delete actual nic and update the new one.
                         elsif new_model && new_model != nic_class
-                                device_change << { :device => vnic, :operation => :remove }
-                                device_change << calculate_add_nic_spec(unic, vnic.unitNumber)
+                            device_change << {
+                                :device => vnic,
+                                :operation => :remove
+                            }
+                            device_change <<
+                                calculate_add_nic_spec(
+                                    unic,
+                                    vnic.unitNumber
+                                )
+                        # update the actual nic
                         else
-                                vnic.macAddress   = unic['MAC']
-                                device_change << { :device => vnic, :operation => :edit }
+                            vnic.macAddress = unic['MAC']
+                            device_change <<
+                                {
+                                    :device => vnic,
+                                    :operation => :edit
+                                }
                         end
                     end
 
                 end
             rescue StandardError => e
-                raise "There is a problem with your vm NICS, make sure that they are working properly. Error: #{e.message}"
+                raise 'There is a problem with your vm NICS,'\
+                       'make sure that they are working properly. '\
+                        "Error: #{e.message}"
             end
 
             # Save in extraconfig the key for unmanaged disks
             if !extraconfig.empty? || !device_change.empty?
-                spec[:extraConfig]  = extraconfig if !extraconfig.empty?
-                spec[:deviceChange] = device_change if !device_change.empty?
+                spec[:extraConfig]  = extraconfig unless extraconfig.empty?
+                spec[:deviceChange] = device_change unless device_change.empty?
 
                 return spec unless execute
 
@@ -1101,40 +1343,55 @@ module VCenterDriver
             # OpenNebula VM disks saved inside .vmx file in vCenter
             disks_extraconfig_current = {}
             # iterate over all attributes and get the disk information
-            # keys for disks are prefixed with opennebula.disk and opennebula.mdisk
+            # keys for disks are prefixed with
+            # opennebula.disk and opennebula.mdisk
             @item.config.extraConfig.each do |elem|
-                disks_extraconfig_current[elem.key] = elem.value if elem.key.start_with?("opennebula.disk.")
-                disks_extraconfig_current[elem.key] = elem.value if elem.key.start_with?("opennebula.mdisk.")
+                disks_extraconfig_current[elem.key] =
+                    elem.value if elem.key.start_with?('opennebula.disk.')
+                disks_extraconfig_current[elem.key] =
+                    elem.value if elem.key.start_with?('opennebula.mdisk.')
             end
 
             # disks that exist currently in the vCenter Virtual Machine
             disks_vcenter_current = []
             disks_each(:synced?) do |disk|
                 begin
-                    key_prefix = disk.managed? ? "opennebula.mdisk." : "opennebula.disk."
+                    if disk.managed?
+                        key_prefix = 'opennebula.mdisk.'
+                    else
+                        key_prefix = 'opennebula.disk.'
+                    end
                     k = "#{key_prefix}#{disk.id}"
-                    v = "#{disk.key}"
+                    v = disk.key.to_s
 
-                    disks_vcenter_current << {key: k, value: v}
-                rescue StandardError => e
+                    disks_vcenter_current << { :key => k, :value => v }
+                rescue StandardError => _e
                     next
                 end
             end
 
             update = false
-            # differences in the number of disks between vCenter and OpenNebula VMs
-            num_disks_difference = disks_extraconfig_current.keys.count - disks_vcenter_current.count
+            # differences in the number of disks
+            # between vCenter and OpenNebula VMs
+            num_disks_difference =
+                disks_extraconfig_current.keys.count -
+                    disks_vcenter_current.count
 
             # check if disks are same in vCenter and OpenNebula
             disks_vcenter_current.each do |item|
                 # check if vCenter disk have representation in the extraConfig
-                # but with a different key, then we have to update
-                if (disks_extraconfig_current.has_key? item[:key]) and !(disks_extraconfig_current[item[:key]] == item[:value])
+                # but with a different key, then we have to update
+                first_condition =
+                    disks_extraconfig_current.key? item[:key]
+                second_condition =
+                    disks_extraconfig_current[item[:key]] == item[:value]
+                if first_condition && !second_condition
                     update = true
                 end
-                # check if vCenter disk hasn't got a representation in the extraConfig
-                # then we have to update
-                if !disks_extraconfig_current.has_key? item[:key]
+                # check if vCenter disk hasn't got
+                # a representation in the extraConfig
+                # then we have to update
+                if !disks_extraconfig_current.key? item[:key]
                     update = true
                 end
             end
@@ -1142,32 +1399,39 @@ module VCenterDriver
             # new configuration for vCenter .vmx file
             disks_extraconfig_new = {}
 
-            if num_disks_difference != 0 || update
-                # Step 1: remove disks in the current configuration of .vmx
-                # Avoids having an old disk in the configuration that does not really exist
-                disks_extraconfig_current.keys.each do |key|
-                    disks_extraconfig_new[key] = ""
-                end
+            return unless num_disks_difference != 0 || update
 
-                # Step 2: add current vCenter disks to new configuration
-                disks_vcenter_current.each do |item|
-                    disks_extraconfig_new[item[:key]] = item[:value]
-                end
-
-                # Step 3: create extraconfig_new with the values to update
-                extraconfig_new = []
-                disks_extraconfig_new.keys.each do |key|
-                    extraconfig_new << {key: key, value: disks_extraconfig_new[key]}
-                end
-
-                # Step 4: update the extraConfig
-                spec_hash = {:extraConfig => extraconfig_new}
-                spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
-                @item.ReconfigVM_Task(:spec => spec).wait_for_completion
+            # Step 1: remove disks in the current configuration of .vmx
+            # Avoids having an old disk in the configuration
+            # that does not really exist
+            disks_extraconfig_current.keys.each do |key|
+                disks_extraconfig_new[key] = ''
             end
+
+            # Step 2: add current vCenter disks to new configuration
+            disks_vcenter_current.each do |item|
+                disks_extraconfig_new[item[:key]] = item[:value]
+            end
+
+            # Step 3: create extraconfig_new with the values to update
+            extraconfig_new = []
+            disks_extraconfig_new.keys.each do |key|
+                extraconfig_new <<
+                    {
+                        :key =>
+                            key,
+                        :value =>
+                            disks_extraconfig_new[key]
+                    }
+            end
+
+            # Step 4: update the extraConfig
+            spec_hash = { :extraConfig => extraconfig_new }
+            spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
+            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
         end
 
-        # Build extraconfig section to reference disks
+        # Build extraconfig section to reference disks
         # by key and avoid problems with changing paths
         # (mainly due to snapshots)
         # Uses VM Templte if ref available, or the vCenter VM if not
@@ -1176,19 +1440,24 @@ module VCenterDriver
             return [] if disks.empty? || instantiated_as_persistent?
 
             extraconfig = []
-            key_prefix = managed ? "opennebula.mdisk" : "opennebula.disk"
+            if managed
+                key_prefix = 'opennebula.mdisk'
+            else
+                key_prefix = 'opennebula.disk'
+            end
 
             # Get vcenter VM disks to know real path of cloned disk
             vcenter_disks = vcenter_disks_get
 
             # Create an array with the paths of the disks in vcenter template
             if !template_ref.nil?
-              template = VCenterDriver::Template.new_from_ref(template_ref, vi_client)
-              template_disks = template.vcenter_disks_get
+                template = VCenterDriver::Template.new_from_ref(template_ref,
+                                                                vi_client)
+                template_disks = template.vcenter_disks_get
             else
-              # If we are dealing with a Wild VM, we simply use
-              # what is available in the vCenter VM
-              template_disks = vcenter_disks_get
+                # If we are dealing with a Wild VM, we simply use
+                # what is available in the vCenter VM
+                template_disks = vcenter_disks_get
             end
             template_disks_vector = []
             template_disks.each do |d|
@@ -1197,33 +1466,56 @@ module VCenterDriver
 
             # Try to find index of disks in template disks
             disks.each do |disk|
-                disk_source = VCenterDriver::FileHelper.unescape_path(disk["SOURCE"])
-                template_disk = template_disks.select{|d| d[:path_wo_ds] == disk_source }.first
+                disk_source =
+                    VCenterDriver::FileHelper
+                    .unescape_path(
+                        disk['SOURCE']
+                    )
+                template_disk = template_disks.select do |d|
+                    d[:path_wo_ds] == disk_source
+                end.first
 
                 if template_disk
-                    vcenter_disk  = vcenter_disks.select{|d| d[:key] == template_disk[:key]}.first
+                    vcenter_disk = vcenter_disks.select do |d|
+                        d[:key] == template_disk[:key]
+                    end.first
                 end
 
-                raise "disk with path #{disk_source} not found in the vCenter VM" if !vcenter_disk
+                unless vcenter_disk
+                    raise "disk with path #{disk_source}"\
+                            'not found in the vCenter VM'
+                end
 
                 reference = {}
-                reference[:key]   = "#{key_prefix}.#{disk["DISK_ID"]}"
-                reference[:value] = "#{vcenter_disk[:key]}"
+                reference[:key]   = "#{key_prefix}.#{disk['DISK_ID']}"
+                reference[:value] = (vcenter_disk[:key]).to_s
                 extraconfig << reference
             end
 
             extraconfig
         end
 
-        # TODO: review storagedrs
+        # create storagedrs disks
+        #
+        # @param device_change_spod [array] add disk spec for every device
+        #
+        # @param device_change_spod_ids [object] map from unit ctrl to
+        #        disk_id
+        #
+        # @return extra_config [Array] array with the extra config for vCenter
         def create_storagedrs_disks(device_change_spod, device_change_spod_ids)
-
-            sm = get_sm
+            sm = storagemanager
             disk_locator = []
             extra_config = []
 
             device_change_spod.each do |device_spec|
-                disk_locator << RbVmomi::VIM.PodDiskLocator(diskId: device_spec[:device].key)
+                disk_locator <<
+                    RbVmomi::VIM
+                    .PodDiskLocator(
+                        :diskId => device_spec[
+                            :device
+                        ].key
+                    )
             end
 
             spec = {}
@@ -1237,38 +1529,53 @@ module VCenterDriver
 
             # The storage pod selection requires initialize
             spod_hash = {}
-            spod_hash[:initialVmConfig] = [ vmpod_config ]
+            spod_hash[:initialVmConfig] = [vmpod_config]
             spod_select = RbVmomi::VIM::StorageDrsPodSelectionSpec(spod_hash)
             storage_spec = RbVmomi::VIM.StoragePlacementSpec(
-                type: :reconfigure,
-                podSelectionSpec: spod_select,
-                vm: self['_ref'],
-                configSpec: spec
+                :type => :reconfigure,
+                :podSelectionSpec => spod_select,
+                :vm => self['_ref'],
+                :configSpec => spec
             )
 
             # Query a storage placement recommendation
-            result = sm.RecommendDatastores(storageSpec: storage_spec) rescue nil
+            result = sm
+                     .RecommendDatastores(
+                         :storageSpec => storage_spec
+                     ) rescue nil
 
-            raise "Could not get placement specification for StoragePod" if result.nil?
+            if result.nil?
+                raise 'Could not get placement specification for StoragePod'
+            end
 
-            if !result.respond_to?(:recommendations) || result.recommendations.size == 0
-                raise "Could not get placement specification for StoragePod"
+            if !result.respond_to?(:recommendations) ||
+                result.recommendations.empty?
+                raise 'Could not get placement specification for StoragePod'
             end
 
             # Get recommendation key to be applied
             key = result.recommendations.first.key ||= ''
-            raise "Missing Datastore recommendation for StoragePod" if key.empty?
+
+            if key.empty?
+                raise 'Missing Datastore recommendation for StoragePod'
+            end
 
             # Apply recommendation
-            sm.ApplyStorageDrsRecommendation_Task(key: [key]).wait_for_completion
+            sm.ApplyStorageDrsRecommendation_Task(
+                :key => [key]
+            ).wait_for_completion
 
             # Set references in opennebula.disk elements
             device_change_spod.each do |device_spec|
                 unit_number    = device_spec[:device].unitNumber
                 controller_key = device_spec[:device].controllerKey
-                key            = get_vcenter_disk_key(unit_number, controller_key)
-                disk_id        = device_change_spod_ids["#{controller_key}-#{unit_number}"]
-                reference      = {}
+                key            = get_vcenter_disk_key(unit_number,
+                                                      controller_key)
+                disk_id =
+                    device_change_spod_ids[
+                        "#{controller_key}-#{unit_number}"
+                    ]
+                reference = {}
                 reference[:key]   = "opennebula.disk.#{disk_id}"
                 reference[:value] = key.to_s
                 extra_config << reference
@@ -1279,37 +1586,41 @@ module VCenterDriver
 
         # set the boot order of the machine
         #
-        # @param index [String | boot_info] boot information stored in
+        # @param boot_info [String] boot information stored in
         # the template of the virtual machine. example: disk0, nic0
         #
         # @return [Array (vCenterbootClass)] An array with the vCenter classes
-        def set_boot_order(boot_info)
-            convert = ->(device_str){
+        def boot_order_update(boot_info)
+            convert = lambda {|device_str|
                 spl = device_str.scan(/^(nic|disk)(\d+$)/).flatten
                 raise "#{device_str} is not supported" if spl.empty?
 
                 sync = "sync_#{spl[0]}s"
-                for i in 0..1
-                    device = send(spl[0], *[spl[1]])
+                (0..1).each do |_i|
+                    device = send(spl[0], spl[1])
                     break if device.exists?
+
                     send(sync)
                 end
 
                 device.boot_dev
             }
 
-            boot_order = boot_info.split(',').map{ |str| convert.call(str) }
+            boot_order = boot_info.split(',').map {|str| convert.call(str) }
 
-            RbVmomi::VIM.VirtualMachineBootOptions({bootOrder: boot_order})
+            RbVmomi::VIM.VirtualMachineBootOptions({ :bootOrder => boot_order })
         end
 
         # sync OpenNebula nic model with vCenter
         #
-        # @param option  [symbol]  if :all is provided the method will try to sync
-        # all the nics (detached and not existing ones) otherwise it will only sync
+        # @param option  [symbol]  if :all is provided
+        # the method will try to sync
+        # all the nics (detached and not existing ones)
+        # otherwise it will only sync
         # the nics that are not existing
         #
-        # @param execute [boolean] indicates if the reconfigure operation is going to
+        # @param execute [boolean] indicates
+        # if the reconfigure operation is going to
         # be executed
         def sync_nics(option = :none, execute = true)
             device_change = []
@@ -1317,7 +1628,8 @@ module VCenterDriver
             if option == :all
                 dchange = []
 
-                # detached? condition indicates that the nic exists in OpeNebula but not
+                # detached? condition indicates that
+                # the nic exists in OpeNebula but not
                 # in vCenter
                 nics_each(:detached?) do |nic|
                     dchange << {
@@ -1332,7 +1644,8 @@ module VCenterDriver
                 end
             end
 
-            # no_exits? condition indicates that the nic does not exist in vCenter
+            # no_exits? condition indicates that
+            # the nic does not exist in vCenter
             nics_each(:no_exists?) do |nic|
                 device_change << calculate_add_nic_spec(nic.one_item)
             end
@@ -1347,14 +1660,14 @@ module VCenterDriver
             info_nics
         end
 
-        #Get required parameters to use VMware HTML Console SDK
-        #To be used with the following SDK:
-        #https://code.vmware.com/web/sdk/2.1.0/html-console
+        # Get required parameters to use VMware HTML Console SDK
+        # To be used with the following SDK:
+        # https://code.vmware.com/web/sdk/2.1.0/html-console
         #
-        def get_html_console_parameters
-          ticket = @item.AcquireTicket( :ticketType => "webmks" )
-          ticket_parameters = {:ticket => ticket.ticket, :host => ticket.host, :port => ticket.port}
-          ticket_parameters
+        def html_console_parameters
+            ticket = @item.AcquireTicket(:ticketType => 'webmks')
+            { :ticket => ticket.ticket, :host => ticket.host,
+  :port => ticket.port }
         end
 
         # Synchronize the OpenNebula VM representation with vCenter VM
@@ -1371,7 +1684,7 @@ module VCenterDriver
             resize_unmanaged_disks
 
             if deploy[:boot] && !deploy[:boot].empty?
-                boot_opts = set_boot_order(deploy[:boot])
+                boot_opts = boot_order_update(deploy[:boot])
             end
 
             # changes from sync_disks
@@ -1382,7 +1695,7 @@ module VCenterDriver
             extraconfig += extraconfig_context
 
             # get file_ds
-            if (files = one_item["TEMPLATE/CONTEXT/FILES_DS"])
+            if (files = one_item['TEMPLATE/CONTEXT/FILES_DS'])
                 file_id = 0
                 files.split(' ').each do |file|
                     extraconfig += extraconfig_file(file, file_id)
@@ -1397,14 +1710,14 @@ module VCenterDriver
             device_change += sync_nics(:all, false)
 
             # Set CPU, memory and extraconfig
-            num_cpus = one_item["TEMPLATE/VCPU"] || 1
+            num_cpus = one_item['TEMPLATE/VCPU'] || 1
             spec_hash = {
                 :numCPUs      => num_cpus.to_i,
-                :memoryMB     => one_item["TEMPLATE/MEMORY"],
+                :memoryMB     => one_item['TEMPLATE/MEMORY'],
                 :extraConfig  => extraconfig,
                 :deviceChange => device_change
             }
-            num_cores = one_item["TEMPLATE/TOPOLOGY/CORES"] || num_cpus.to_i
+            num_cores = one_item['TEMPLATE/TOPOLOGY/CORES'] || num_cpus.to_i
             if num_cpus.to_i % num_cores.to_i != 0
                 num_cores = num_cpus.to_i
             end
@@ -1440,89 +1753,155 @@ module VCenterDriver
 
             # token
             token = File.read(File.join(VAR_LOCATION,
-                            'vms',
-                            one_item['ID'],
-                            'token.txt')).chomp rescue nil
+                                        'vms',
+                                        one_item['ID'],
+                                        'token.txt')).chomp rescue nil
 
             context_text += "ONEGATE_TOKEN='#{token}'\n" if token
 
             # context_text
             [
-                { :key => "guestinfo.opennebula.context",
+                { :key => 'guestinfo.opennebula.context',
                   :value => Base64.encode64(context_text) }
             ]
         end
 
         def extraconfig_vnc
-            if one_item["TEMPLATE/GRAPHICS"]
-                vnc_port   = one_item["TEMPLATE/GRAPHICS/PORT"] || ''
-                vnc_listen = one_item["TEMPLATE/GRAPHICS/LISTEN"] || "0.0.0.0"
-                vnc_keymap = one_item["TEMPLATE/GRAPHICS/KEYMAP"]
+            if one_item['TEMPLATE/GRAPHICS']
+                vnc_port   = one_item['TEMPLATE/GRAPHICS/PORT'] || ''
+                vnc_listen = one_item['TEMPLATE/GRAPHICS/LISTEN'] || '0.0.0.0'
+                vnc_keymap = one_item['TEMPLATE/GRAPHICS/KEYMAP']
 
-                conf = [ {:key => "remotedisplay.vnc.enabled",:value => "TRUE"},
-                        {:key => "remotedisplay.vnc.port",   :value => vnc_port},
-                        {:key => "remotedisplay.vnc.ip",     :value => vnc_listen}]
+                conf =
+                    [
+                        {
+                            :key =>
+                                'remotedisplay.vnc.enabled',
+                            :value =>
+                                'TRUE'
+                        },
+                        {
+                            :key =>
+                                'remotedisplay.vnc.port',
+                            :value =>
+                                vnc_port
+                        },
+                        {
+                            :key =>
+                                'remotedisplay.vnc.ip',
+                            :value =>
+                                vnc_listen
+                        }
+                    ]
 
-                conf += [{:key => "remotedisplay.vnc.keymap",
-                                :value => vnc_keymap}] if vnc_keymap
+                conf +=
+                    [
+                        {
+                            :key =>
+                              'remotedisplay.vnc.keymap',
+                            :value =>
+                                vnc_keymap
+                        }
+                    ] if vnc_keymap
 
                 conf
             else
-                conf = []
+                []
             end
         end
 
         # Regenerate context when devices are hot plugged (reconfigure)
         def regenerate_context
-            spec_hash = { :extraConfig  => extraconfig_context }
+            spec_hash = { :extraConfig => extraconfig_context }
             spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
 
             begin
                 @item.ReconfigVM_Task(:spec => spec).wait_for_completion
-            rescue Exception => e
-                raise "Cannot create snapshot for VM: #{e.message}\n#{e.backtrace}"
+            rescue StandardError => e
+                error = "Cannot create snapshot for VM: #{e.message}."
+
+                if VCenterDriver::CONFIG[:debug_information]
+                    error += "\n\n#{e.backtrace}"
+                end
+
+                raise error
             end
         end
 
         # Returns an array of actions to be included in :deviceChange
         def calculate_add_nic_spec(nic, unumber = nil)
-            mac     = nic["MAC"]
-            pg_name = nic["BRIDGE"]
-            default = VCenterDriver::VIHelper.get_default('VM/TEMPLATE/NIC/MODEL')
+            mac     = nic['MAC']
+            pg_name = nic['BRIDGE']
+            default =
+                VCenterDriver::VIHelper
+                .get_default(
+                    'VM/TEMPLATE/NIC/MODEL'
+                )
             tmodel  = one_item['USER_TEMPLATE/NIC_DEFAULT/MODEL']
 
+            # got the model of the nic, first try to get the model
+            # inside the nic, then the model defined by user and
+            # last option model by default in vCenter Driver
             model   = nic['MODEL'] || tmodel || default
             raise 'nic model cannot be empty!' if model == ''
 
-            vnet_ref  = nic["VCENTER_NET_REF"]
+            vnet_ref  = nic['VCENTER_NET_REF']
             backing   = nil
 
-            limit_in  = nic["INBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_PEAK_BW")
-            limit_out = nic["OUTBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_PEAK_BW")
-            limit     = nil
+            # Maximum bitrate for the interface in kilobytes/second
+            # for inbound traffic
+            limit_in  =
+                nic['INBOUND_PEAK_BW'] ||
+                    VCenterDriver::VIHelper.get_default(
+                        'VM/TEMPLATE/NIC/INBOUND_PEAK_BW'
+                    )
+            # Maximum bitrate for the interface in kilobytes/second
+            # for outbound traffic
+            limit_out =
+                nic['OUTBOUND_PEAK_BW'] ||
+                    VCenterDriver::VIHelper.get_default(
+                        'VM/TEMPLATE/NIC/OUTBOUND_PEAK_BW'
+                    )
+            limit = nil
 
             if limit_in && limit_out
                 limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8
             end
 
-            rsrv_in  = nic["INBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_AVG_BW")
-            rsrv_out = nic["OUTBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_AVG_BW")
-            rsrv     = nil
+            # Average bitrate for the interface in kilobytes/second
+            # for inbound traffic
+            rsrv_in  =
+                nic['INBOUND_AVG_BW'] ||
+                    VCenterDriver::VIHelper.get_default(
+                        'VM/TEMPLATE/NIC/INBOUND_AVG_BW'
+                    )
+            # Average bitrate for the interface in kilobytes/second
+            # for outbound traffic
+            rsrv_out =
+                nic['OUTBOUND_AVG_BW'] ||
+                    VCenterDriver::VIHelper.get_default(
+                        'VM/TEMPLATE/NIC/OUTBOUND_AVG_BW'
+                    )
+            rsrv = nil
 
             if rsrv_in || rsrv_out
                 rsrv=([rsrv_in.to_i, rsrv_out.to_i].min / 1024) * 8
             end
 
-            network = self["runtime.host"].network.select do |n|
+            # get the network with ref equal to vnet_ref or
+            # with name equal to pg_name
+            network = self['runtime.host'].network.select do |n|
                 n._ref == vnet_ref || n.name == pg_name
             end
             network = network.first
 
-            raise "#{pg_name} not found in #{self['runtime.host'].name}" unless network
+            unless network
+                raise "#{pg_name} not found in #{self['runtime.host'].name}"
+            end
 
             # start in one, we want the next avaliable id
             card_num = 1
-            @item["config.hardware.device"].each do |dv|
+            @item['config.hardware.device'].each do |dv|
                 card_num += 1 if VCenterDriver::Network.nic?(dv)
             end
 
@@ -1530,27 +1909,38 @@ module VCenterDriver
 
             if network.class == RbVmomi::VIM::Network
                 backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
-                            :deviceName => pg_name,
-                            :network    => network)
+                    :deviceName => pg_name,
+                    :network    => network
+                )
             elsif network.class == RbVmomi::VIM::DistributedVirtualPortgroup
-                port    = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
-                            :switchUuid =>
-                                    network.config.distributedVirtualSwitch.uuid,
-                            :portgroupKey => network.key)
+                port = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
+                    :switchUuid =>
+                            network.config.distributedVirtualSwitch.uuid,
+                    :portgroupKey => network.key
+                )
                 backing =
-                  RbVmomi::VIM.VirtualEthernetCardDistributedVirtualPortBackingInfo(
-                    :port => port)
+                    RbVmomi::VIM
+                    .VirtualEthernetCardDistributedVirtualPortBackingInfo(
+                        :port => port
+                    )
             elsif network.class == RbVmomi::VIM::OpaqueNetwork
-                backing = RbVmomi::VIM.VirtualEthernetCardOpaqueNetworkBackingInfo(
-                            :opaqueNetworkId => network.summary.opaqueNetworkId,
-                            :opaqueNetworkType => "nsx.LogicalSwitch")
+                backing =
+                    RbVmomi::VIM
+                    .VirtualEthernetCardOpaqueNetworkBackingInfo(
+                        :opaqueNetworkId =>
+                            network.summary.opaqueNetworkId,
+                        :opaqueNetworkType =>
+                            'nsx.LogicalSwitch'
+                    )
             else
-                raise "Unknown network class"
+                raise 'Unknown network class'
             end
 
             # grab the last unitNumber to ensure the nic to be added at the end
             if !unumber
-                @unic   = @unic || vcenter_nics_get.map{|d| d.unitNumber}.max || 0
+                @unic = @unic || vcenter_nics_list.map do |d|
+                    d.unitNumber
+                end.max || 0
                 unumber = @unic += 1
             else
                 @unic   = unumber
@@ -1559,7 +1949,7 @@ module VCenterDriver
             card_spec = {
                 :key => 0,
                 :deviceInfo => {
-                    :label => "net" + card_num.to_s,
+                    :label => 'net' + card_num.to_s,
                     :summary => pg_name
                 },
                 :backing     => backing,
@@ -1571,14 +1961,31 @@ module VCenterDriver
             if (limit || rsrv) && (limit > 0)
                 ra_spec = {}
                 rsrv = limit if rsrv > limit
+                # The bandwidth limit for the virtual network adapter. The
+                # utilization of the virtual network adapter will not exceed
+                # this limit, even if there are available resources. To clear
+                # the value of this property and revert it to unset, set the
+                # vaule to "-1" in an update operation. Units in Mbits/sec
                 ra_spec[:limit] = limit if limit
+                # Amount of network bandwidth that is guaranteed to the virtual
+                # network adapter. If utilization is less than reservation, the
+                # resource can be used by other virtual network adapters.
+                # Reservation is not allowed to exceed the value of limit if
+                # limit is set. Units in Mbits/sec
                 ra_spec[:reservation] = rsrv if rsrv
-                ra_spec[:share] =  RbVmomi::VIM.SharesInfo({
-                        :level => RbVmomi::VIM.SharesLevel("normal"),
-                        :shares => 0
-                    })
+                # Network share. The value is used as a relative weight in
+                # competing for shared bandwidth, in case of resource contention
+                ra_spec[:share] =
+                    RbVmomi::VIM.SharesInfo(
+                        {
+                            :level => RbVmomi::VIM.SharesLevel('normal'),
+                            :shares => 0
+                        }
+                    )
                 card_spec[:resourceAllocation] =
-                  RbVmomi::VIM.VirtualEthernetCardResourceAllocation(ra_spec)
+                    RbVmomi::VIM.VirtualEthernetCardResourceAllocation(
+                        ra_spec
+                    )
             end
 
             {
@@ -1589,33 +1996,62 @@ module VCenterDriver
 
         # Returns an array of actions to be included in :deviceChange
         def calculate_add_nic_spec_autogenerate_mac(nic)
-            pg_name = nic["BRIDGE"]
-            model   = ''
-            default = VCenterDriver::VIHelper.get_default('VM/TEMPLATE/NIC/MODEL')
-            tmodel  = one_item['USER_TEMPLATE/NIC_DEFAULT/MODEL']
+            pg_name = nic['BRIDGE']
 
-            model   = nic['MODEL'] || tmodel || default
+            default =
+                VCenterDriver::VIHelper.get_default(
+                    'VM/TEMPLATE/NIC/MODEL'
+                )
+            tmodel = one_item['USER_TEMPLATE/NIC_DEFAULT/MODEL']
 
-            vnet_ref  = nic["VCENTER_NET_REF"]
+            model = nic['MODEL'] || tmodel || default
+
+            vnet_ref  = nic['VCENTER_NET_REF']
             backing   = nil
 
-            limit_in  = nic["INBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_PEAK_BW")
-            limit_out = nic["OUTBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_PEAK_BW")
-            limit     = nil
+            # Maximum bitrate for the interface in kilobytes/second
+            # for inbound traffic
+            limit_in  =
+                nic['INBOUND_PEAK_BW'] ||
+                    VCenterDriver::VIHelper.get_default(
+                        'VM/TEMPLATE/NIC/INBOUND_PEAK_BW'
+                    )
+            # Maximum bitrate for the interface in kilobytes/second
+            # for outbound traffic
+            limit_out =
+                nic['OUTBOUND_PEAK_BW'] ||
+                    VCenterDriver::VIHelper.get_default(
+                        'VM/TEMPLATE/NIC/OUTBOUND_PEAK_BW'
+                    )
+            limit = nil
 
             if limit_in && limit_out
                 limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8
             end
 
-            rsrv_in  = nic["INBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_AVG_BW")
-            rsrv_out = nic["OUTBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_AVG_BW")
-            rsrv     = nil
+            # Average bitrate for the interface in kilobytes/second
+            # for inbound traffic
+            rsrv_in  =
+                nic['INBOUND_AVG_BW'] ||
+                    VCenterDriver::VIHelper.get_default(
+                        'VM/TEMPLATE/NIC/INBOUND_AVG_BW'
+                    )
+
+            # Average bitrate for the interface in kilobytes/second
+            # for outbound traffic
+            rsrv_out =
+                nic['OUTBOUND_AVG_BW'] ||
+                    VCenterDriver::VIHelper.get_default(
+                        'VM/TEMPLATE/NIC/OUTBOUND_AVG_BW'
+                    )
+
+            rsrv = nil
 
             if rsrv_in || rsrv_out
                 rsrv=([rsrv_in.to_i, rsrv_out.to_i].min / 1024) * 8
             end
 
-            network = self["runtime.host"].network.select do |n|
+            network = self['runtime.host'].network.select do |n|
                 n._ref == vnet_ref || n.name == pg_name
             end
 
@@ -1623,7 +2059,7 @@ module VCenterDriver
 
             card_num = 1 # start in one, we want the next available id
 
-            @item["config.hardware.device"].each do |dv|
+            @item['config.hardware.device'].each do |dv|
                 card_num += 1 if VCenterDriver::Network.nic?(dv)
             end
 
@@ -1631,28 +2067,35 @@ module VCenterDriver
 
             if network.class == RbVmomi::VIM::Network
                 backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
-                            :deviceName => pg_name,
-                            :network    => network)
+                    :deviceName => pg_name,
+                    :network    => network
+                )
             elsif network.class == RbVmomi::VIM::DistributedVirtualPortgroup
-                port    = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
-                            :switchUuid =>
-                                    network.config.distributedVirtualSwitch.uuid,
-                            :portgroupKey => network.key)
+                port = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
+                    :switchUuid =>
+                            network.config.distributedVirtualSwitch.uuid,
+                    :portgroupKey => network.key
+                )
                 backing =
-                  RbVmomi::VIM.VirtualEthernetCardDistributedVirtualPortBackingInfo(
-                    :port => port)
+                    RbVmomi::VIM
+                    .VirtualEthernetCardDistributedVirtualPortBackingInfo(
+                        :port => port
+                    )
             elsif network.class == RbVmomi::VIM::OpaqueNetwork
-                backing = RbVmomi::VIM.VirtualEthernetCardOpaqueNetworkBackingInfo(
-                            :opaqueNetworkId => network.summary.opaqueNetworkId,
-                            :opaqueNetworkType => "nsx.LogicalSwitch")
+                backing =
+                    RbVmomi::VIM
+                    .VirtualEthernetCardOpaqueNetworkBackingInfo(
+                        :opaqueNetworkId => network.summary.opaqueNetworkId,
+                        :opaqueNetworkType => 'nsx.LogicalSwitch'
+                    )
             else
-                raise "Unknown network class"
+                raise 'Unknown network class'
             end
 
             card_spec = {
                 :key => 0,
                 :deviceInfo => {
-                    :label => "net" + card_num.to_s,
+                    :label => 'net' + card_num.to_s,
                     :summary => pg_name
                 },
                 :backing     => backing,
@@ -1662,14 +2105,32 @@ module VCenterDriver
             if (limit || rsrv) && (limit > 0)
                 ra_spec = {}
                 rsrv = limit if rsrv > limit
+                # The bandwidth limit for the virtual network adapter. The
+                # utilization of the virtual network adapter will not exceed
+                # this limit, even if there are available resources. To clear
+                # the value of this property and revert it to unset, set the
+                # vaule to "-1" in an update operation. Units in Mbits/sec
                 ra_spec[:limit] = limit if limit
+                # Amount of network bandwidth that is guaranteed to the virtual
+                # network adapter. If utilization is less than reservation, the
+                # resource can be used by other virtual network adapters.
+                # Reservation is not allowed to exceed the value of limit if
+                # limit is set. Units in Mbits/sec
                 ra_spec[:reservation] = rsrv if rsrv
-                ra_spec[:share] =  RbVmomi::VIM.SharesInfo({
-                        :level => RbVmomi::VIM.SharesLevel("normal"),
-                        :shares => 0
-                    })
+                # Network share. The value is used as a relative weight in
+                # competing for shared bandwidth, in case of resource contention
+                ra_spec[:share] =
+                    RbVmomi::VIM.SharesInfo(
+                        {
+                            :level =>
+                                RbVmomi::VIM.SharesLevel(
+                                    'normal'
+                                ),
+                            :shares => 0
+                        }
+                    )
                 card_spec[:resourceAllocation] =
-                  RbVmomi::VIM.VirtualEthernetCardResourceAllocation(ra_spec)
+                    RbVmomi::VIM.VirtualEthernetCardResourceAllocation(ra_spec)
             end
 
             {
@@ -1686,16 +2147,22 @@ module VCenterDriver
                 # A new NIC requires a vcenter spec
                 attach_nic_array = []
                 attach_nic_array << calculate_add_nic_spec(one_nic)
-                spec_hash[:deviceChange] = attach_nic_array if !attach_nic_array.empty?
+                spec_hash[:deviceChange] =
+                    attach_nic_array unless attach_nic_array.empty?
 
                 # Reconfigure VM
                 spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
 
                 @item.ReconfigVM_Task(:spec => spec).wait_for_completion
-            rescue Exception => e
-                raise "Cannot attach NIC to VM: #{e.message}\n#{e.backtrace.join("\n")}"
-            end
+            rescue StandardError => e
+                error = "Cannot attach NIC to VM: #{e.message}."
 
+                if VCenterDriver::CONFIG[:debug_information]
+                    error += "\n\n#{e.backtrace.join("\n")}"
+                end
+
+                raise error
+            end
         end
 
         # Detach NIC from VM
@@ -1708,12 +2175,19 @@ module VCenterDriver
 
             # Remove NIC from VM in the ReconfigVM_Task
             spec_hash[:deviceChange] = [
-                    :operation => :remove,
-                    :device => nic.vc_item ]
+                :operation => :remove,
+                :device => nic.vc_item
+            ]
             begin
                 @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
-            rescue Exception => e
-                raise "Cannot detach NIC from VM: #{e.message}\n#{e.backtrace.join("\n")}"
+            rescue StandardError => e
+                error = "Cannot detach NIC from VM: #{e.message}."
+
+                if VCenterDriver::CONFIG[:debug_information]
+                    error += "\n\n#{e.backtrace.join("\n")}"
+                end
+
+                raise error
             end
         end
 
@@ -1723,7 +2197,10 @@ module VCenterDriver
             device_change = []
 
             nics_each(:exists?) do |nic|
-                device_change << {:operation => :remove, :device => nic.vc_item}
+                device_change << {
+                    :operation => :remove,
+                    :device => nic.vc_item
+                }
             end
 
             return if device_change.empty?
@@ -1733,64 +2210,75 @@ module VCenterDriver
 
             begin
                 @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
-            rescue Exception => e
-                raise "Cannot detach all NICs from VM: #{e.message}\n#{e.backtrace}"
+            rescue StandardError => e
+                error = "Cannot detach all NICs from VM: #{e.message}."
+
+                if VCenterDriver::CONFIG[:debug_information]
+                    error += "\n\n#{e.backtrace}"
+                end
+
+                raise error
             end
         end
 
         # try to get specs for new attached disks
         # using disk_each method with :no_exists? condition
-        def attach_disks_specs()
+        def attach_disks_specs
             attach_disk_array     = []
             extraconfig           = []
             attach_spod_array     = []
             attach_spod_disk_info = {}
 
-
-            pos = {:ide => 0, :scsi => 0}
+            pos = { :ide => 0, :scsi => 0 }
             disks_each(:no_exists?) do |disk|
-                k = disk.one_item['TYPE'] == 'CDROM' ? :ide : :scsi
+                disk.one_item['TYPE'] == 'CDROM' ? k = :ide : k = :scsi
 
                 if disk.storpod?
                     spec = calculate_add_disk_spec(disk.one_item, pos[k])
                     attach_spod_array << spec
-                    unit_ctrl = "#{spec[:device].controllerKey}-#{spec[:device].unitNumber}"
+
+                    controller_key = spec[:device].controllerKey
+                    unit_number = spec[:device].unitNumber
+
+                    unit_ctrl = "#{controller_key}-#{unit_number}"
                     attach_spod_disk_info[unit_ctrl] = disk.id
                 else
                     aspec = calculate_add_disk_spec(disk.one_item, pos[k])
-                    extra_key   = "opennebula.mdisk.#{disk.one_item["DISK_ID"]}"
-                    extra_value = "#{aspec[:device].key}"
+                    extra_key   = "opennebula.mdisk.#{disk.one_item['DISK_ID']}"
+                    extra_value = aspec[:device].key.to_s
 
                     attach_disk_array << aspec
-                    extraconfig << {key: extra_key, value: extra_value }
+                    extraconfig << { :key => extra_key, :value => extra_value }
                 end
 
                 pos[k]+=1
             end
 
-
-            { disks:       attach_disk_array,
-              spods:       attach_spod_array,
-              spod_info:   attach_spod_disk_info,
-              extraconfig: extraconfig
-            }
+            { :disks => attach_disk_array,
+              :spods => attach_spod_array,
+              :spod_info => attach_spod_disk_info,
+              :extraconfig => extraconfig }
         end
 
         # try to get specs for detached disks
         # using disk_each method with :dechaded? condition
-        def detach_disks_specs()
+        def detach_disks_specs
             detach_disk_array = []
             extra_config      = []
-            keys = disk_keys_get.invert
+            keys = disk_keys.invert
             ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
             disks_each(:detached?) do |d|
                 key = d.key.to_s
                 source = VCenterDriver::FileHelper.escape_path(d.path)
-                persistent = VCenterDriver::VIHelper.find_persistent_image_by_source(source, ipool)
+                persistent =
+                    VCenterDriver::VIHelper
+                    .find_persistent_image_by_source(
+                        source, ipool
+                    )
 
-                op = {operation: :remove, device: d.device}
-                if !persistent
-                    op[:fileOperation] = :destroy unless d.type == "CDROM"
+                op = { :operation => :remove, :device => d.device }
+                if !persistent && d.type != 'CDROM'
+                    op[:fileOperation] = :destroy
                 end
                 detach_disk_array << op
 
@@ -1798,7 +2286,7 @@ module VCenterDriver
                 extra_config << d.config(:delete) if keys[key]
             end
 
-            return detach_disk_array, extra_config
+            [detach_disk_array, extra_config]
         end
 
         def different_key?(change_disk, vc_disk)
@@ -1809,54 +2297,80 @@ module VCenterDriver
 
         def sync_extraconfig_disk(spec_hash)
             return if spec_hash[:deviceChange].empty?
+
             extraconfig_new = []
             # vCenter mob disks
-            vc_disks = @item["config.hardware.device"].select do |vc_device|
+            vc_disks = @item['config.hardware.device'].select do |vc_device|
                 disk?(vc_device)
             end
             return unless vc_disks
+
             # For each changed disk, compare with vcenter mob disk
-            spec_hash[:deviceChange].each_with_index do |device, index|
+            spec_hash[:deviceChange].each_with_index do |_device, index|
                 change_disk = spec_hash[:deviceChange][index]
                 vc_disks.each do |vc_disk|
-                    if different_key?(change_disk, vc_disk)
-                        extraconfig_new << {key: spec_hash[:extraConfig][index][:key],
-                                            value: vc_disk.key.to_s}
-                    end
+                    next unless different_key?(change_disk, vc_disk)
+
+                    extraconfig_new <<
+                        {
+                            :key =>
+                                spec_hash[:extraConfig][index][:key],
+                          :value =>
+                                vc_disk.key.to_s
+                        }
                 end
             end
-            unless extraconfig_new.empty?
-                spec_hash = {:extraConfig => extraconfig_new}
-                spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
-                @item.ReconfigVM_Task(:spec => spec).wait_for_completion
-            end
+
+            return if extraconfig_new.empty?
+
+            spec_hash = {
+                :extraConfig => extraconfig_new
+            }
+            spec =
+                RbVmomi::VIM
+                .VirtualMachineConfigSpec(
+                    spec_hash
+                )
+            @item.ReconfigVM_Task(
+                :spec => spec
+            ).wait_for_completion
         end
 
         # sync OpenNebula disk model with vCenter
         #
-        # @param option  [symbol]  if :all is provided the method will try to sync
-        # all the disks (detached and not existing ones) otherwishe it will only sync
+        # @param option  [symbol]  if :all is provided the
+        # method will try to sync
+        # all the disks (detached and not existing ones)
+        # otherwishe it will only sync
         # the disks that are not existing
         #
-        # @param execute [boolean] indicates if the reconfigure operation is going to
+        # @param execute [boolean] indicates if the reconfigure operation
+        # is going to
         # be executed
         def sync_disks(option = :nil, execute = true)
             info_disks
 
-            spec_hash       = {}
-            device_change   = []
+            spec_hash = {}
 
             if option == :all
                 detach_op = {}
-                detach_op[:deviceChange], detach_op[:extraConfig] = detach_disks_specs
-                perform = !detach_op[:deviceChange].empty? || !detach_op[:extraConfig].empty?
-                @item.ReconfigVM_Task(:spec => detach_op).wait_for_completion if perform
+                detach_op[:deviceChange], detach_op[:extraConfig] =
+                    detach_disks_specs
+                perform =
+                    !detach_op[:deviceChange].empty? ||
+                        !detach_op[:extraConfig].empty?
+                @item
+                    .ReconfigVM_Task(
+                        :spec => detach_op
+                    ).wait_for_completion if perform
             end
 
             a_specs = attach_disks_specs
 
             if !a_specs[:spods].empty?
-                spec_hash[:extraConfig] = create_storagedrs_disks(a_specs[:spods], a_specs[:spod_info])
+                spec_hash[:extraConfig] =
+                    create_storagedrs_disks(a_specs[:spods],
+                                            a_specs[:spod_info])
             end
 
             if !a_specs[:disks].empty?
@@ -1877,85 +2391,131 @@ module VCenterDriver
             device_change = []
 
             # Extract unmanaged_keys
-            unmanaged_keys = disk_keys_get
+            unmanaged_keys = disk_keys
             vc_disks = vcenter_disks_get
 
             # Check if we're dealing with a StoragePod SYSTEM ds
-            storpod = disk["VCENTER_DS_REF"].start_with?('group-')
+            storpod = disk['VCENTER_DS_REF'].start_with?('group-')
 
             # Check if disk being attached is already connected to the VM
-            raise "DISK is already connected to VM" if disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
+            raise 'DISK is already connected to VM' if disk_attached_to_vm(
+                disk, unmanaged_keys, vc_disks
+            )
 
             # Generate vCenter spec and reconfigure VM
             add_spec = calculate_add_disk_spec(disk)
             device_change << add_spec
-            raise "Could not generate DISK spec" if device_change.empty?
+            raise 'Could not generate DISK spec' if device_change.empty?
 
-            extra_key   = "opennebula.mdisk.#{disk["DISK_ID"]}"
-            extra_value = "#{add_spec[:device].key}"
+            extra_key   = "opennebula.mdisk.#{disk['DISK_ID']}"
+            extra_value = add_spec[:device].key.to_s
 
             spec_hash[:deviceChange] = device_change
-            spec_hash[:extraConfig]  = [{key: extra_key, value: extra_value }]
+            spec_hash[:extraConfig]  =
+                [{ :key => extra_key, :value => extra_value }]
             spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
 
             begin
                 if storpod
-                    #Ask for StorageDRS recommendation to reconfigure VM (AddDisk)
-                    sm = get_sm
+                    # Ask for StorageDRS recommendation
+                    # to reconfigure VM (AddDisk)
+                    sm = storagemanager
 
-                    # Disk id is -1 as I don't know what disk id is going to be set
-                    disk_locator = [ RbVmomi::VIM.PodDiskLocator(diskId: -1) ]
+                    # Disk id is -1 as I don't know
+                    # what disk id is going to be set
+                    disk_locator = [RbVmomi::VIM.PodDiskLocator(:diskId => -1)]
 
                     # Disk locator is required for AddDisk
                     vmpod_hash = {}
                     vmpod_hash[:storagePod] = get_ds
                     vmpod_hash[:disk] = disk_locator
-                    vmpod_config = RbVmomi::VIM::VmPodConfigForPlacement(vmpod_hash)
+                    vmpod_config =
+                        RbVmomi::VIM::VmPodConfigForPlacement(
+                            vmpod_hash
+                        )
 
                     # The storage pod selection requires initialize
                     spod_hash = {}
-                    spod_hash[:initialVmConfig] = [ vmpod_config ]
-                    spod_select = RbVmomi::VIM::StorageDrsPodSelectionSpec(spod_hash)
+                    spod_hash[:initialVmConfig] = [vmpod_config]
+                    spod_select =
+                        RbVmomi::VIM::StorageDrsPodSelectionSpec(
+                            spod_hash
+                        )
                     storage_spec = RbVmomi::VIM.StoragePlacementSpec(
-                        type: :reconfigure,
-                        podSelectionSpec: spod_select,
-                        vm: self['_ref'],
-                        configSpec: spec
+                        :type => :reconfigure,
+                        :podSelectionSpec => spod_select,
+                        :vm => self['_ref'],
+                        :configSpec => spec
                     )
 
                     # Query a storage placement recommendation
-                    result = sm.RecommendDatastores(storageSpec: storage_spec) rescue nil
+                    result = sm
+                             .RecommendDatastores(
+                                 :storageSpec => storage_spec
+                             ) rescue nil
 
-                    raise "Could not get placement specification for StoragePod" if result.nil?
+                    if result.nil?
+                        raise 'Could not get placement '\
+                                'specification for StoragePod'
+                    end
 
-                    if !result.respond_to?(:recommendations) || result.recommendations.size == 0
-                        raise "Could not get placement specification for StoragePod"
+                    if !result.respond_to?(:recommendations) ||
+                        result.recommendations.empty?
+                        raise 'Could not get placement '\
+                                'specification for StoragePod'
                     end
 
                     # Get recommendation key to be applied
                     key = result.recommendations.first.key ||= ''
-                    raise "Missing Datastore recommendation for StoragePod" if key.empty?
+
+                    if key.empty?
+                        raise 'Missing Datastore recommendation for StoragePod'
+                    end
 
                     # Apply recommendation
-                    sm.ApplyStorageDrsRecommendation_Task(key: [key]).wait_for_completion
+                    sm.ApplyStorageDrsRecommendation_Task(
+                        :key => [key]
+                    ).wait_for_completion
 
-                    # Add the key for the volatile disk to the unmanaged opennebula.disk.id variables
-                    unit_number    = spec_hash[:deviceChange][0][:device].unitNumber
-                    controller_key = spec_hash[:deviceChange][0][:device].controllerKey
-                    key = get_vcenter_disk_key(unit_number, controller_key)
+                    # Add the key for the volatile disk to the
+                    # unmanaged opennebula.disk.id variables
+                    unit_number    =
+                        spec_hash[:deviceChange][0][:device]
+                        .unitNumber
+                    controller_key =
+                        spec_hash[:deviceChange][0][:device]
+                        .controllerKey
+                    key =
+                        get_vcenter_disk_key(
+                            unit_number,
+                            controller_key
+                        )
                     spec_hash = {}
                     reference = {}
-                    reference[:key]   = "opennebula.disk.#{disk["DISK_ID"]}"
+                    reference[:key] =
+                        "opennebula.disk.#{disk['DISK_ID']}"
                     reference[:value] = key.to_s
-                    spec_hash[:extraConfig] = [ reference ]
-                    @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
+                    spec_hash[:extraConfig] = [reference]
+                    @item
+                        .ReconfigVM_Task(
+                            :spec => spec_hash
+                        ).wait_for_completion
                 else
-                    @item.ReconfigVM_Task(:spec => spec).wait_for_completion
+                    @item
+                        .ReconfigVM_Task(
+                            :spec => spec
+                        ).wait_for_completion
                 end
                 # Modify extraConfig if disks has a bad key
                 sync_extraconfig_disk(spec_hash)
-            rescue Exception => e
-                raise "Cannot attach DISK to VM: #{e.message}\n#{e.backtrace.join("\n")}"
+            rescue StandardError => e
+                error = "Cannot attach DISK to VM: #{e.message}."
+
+                if VCenterDriver::CONFIG[:debug_information]
+                    error += "\n\n#{e.backtrace.join("\n")}"
+                end
+
+                raise error
             end
         end
 
@@ -1969,29 +2529,39 @@ module VCenterDriver
             end
 
             vm.config.hardware.device.each do |disk|
-                if disk_or_cdrom?(disk)
-                    # Let's try to find if disks is persistent
-                    source_unescaped = disk.backing.fileName.sub(/^\[(.*?)\] /, "") rescue next
-                    source = VCenterDriver::FileHelper.escape_path(source_unescaped)
+                next unless disk_or_cdrom?(disk)
 
-                    persistent = VCenterDriver::VIHelper.find_persistent_image_by_source(source, ipool)
+                # Let's try to find if disks is persistent
+                source_unescaped = disk.backing.fileName.sub(
+                    /^\[(.*?)\] /, ''
+                ) rescue next
+                source = VCenterDriver::FileHelper.escape_path(source_unescaped)
 
-                    if persistent
-                        spec_hash[:deviceChange] << {
-                            :operation => :remove,
-                            :device => disk
-                        }
-                    end
-                end
+                persistent = VCenterDriver::VIHelper
+                             .find_persistent_image_by_source(
+                                 source, ipool
+                             )
 
+                next unless persistent
+
+                spec_hash[:deviceChange] << {
+                    :operation => :remove,
+                    :device => disk
+                }
             end
 
-            return nil if spec_hash[:deviceChange].empty?
+            return if spec_hash[:deviceChange].empty?
 
             begin
                 vm.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
-            rescue Exception => e
-                raise "Cannot detach all DISKs from VM: #{e.message}\n#{e.backtrace}"
+            rescue StandardError => e
+                error = "Cannot detach all DISKs from VM: #{e.message}."
+
+                if VCenterDriver::CONFIG[:debug_information]
+                    error += "\n\n#{e.backtrace}"
+                end
+
+                raise error
             end
         end
 
@@ -2007,38 +2577,45 @@ module VCenterDriver
 
             begin
                 @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
-            rescue Exception => e
-                raise "Cannot detach DISK from VM: #{e.message}\n#{e.backtrace}"\
-                    "Probably an existing VM snapshot includes that disk"
+            rescue StandardError => e
+                error = "Cannot detach DISK from VM: #{e.message}."
+                error += "\nProbably an existing VM snapshot includes that disk"
+
+                if VCenterDriver::CONFIG[:debug_information]
+                    error += "\n\n#{e.backtrace}"
+                end
+
+                raise error
             end
         end
 
         def destroy_disk(disk)
             one_vm = one_item
 
-            detachable= !(one_vm["LCM_STATE"].to_i == 11 && !disk.managed?)
-            detachable = detachable && disk.exists?
+            detachable= !(one_vm['LCM_STATE'].to_i == 11 && !disk.managed?)
+            detachable &&= disk.exists?
 
             return unless detachable
 
             detach_disk(disk)
-            disk.destroy()
+            disk.destroy
 
             @disks.delete(disk.id.to_s)
         end
 
         # Get vcenter device representing DISK object (hotplug)
         def disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
-            img_name = ""
+            img_name = ''
             device_found = nil
-            disk_id = disk["DISK_ID"]
+            disk_id = disk['DISK_ID']
             unmanaged_key = unmanaged_keys["opennebula.disk.#{disk_id}"]
 
             img_name_escaped = VCenterDriver::FileHelper.get_img_name(
-                                    disk,
-                                    one_item['ID'],
-                                    self['name'],
-                                    instantiated_as_persistent?)
+                disk,
+                one_item['ID'],
+                self['name'],
+                instantiated_as_persistent?
+            )
 
             img_name = VCenterDriver::FileHelper.unescape_path(img_name_escaped)
 
@@ -2052,17 +2629,17 @@ module VCenterDriver
                 end
             end
 
-            return device_found
+            device_found
         end
 
         def get_key(type)
-            @used_keys = [] unless @used_keys
+            @used_keys ||= []
 
-            if type == "CDROM"
-                bound = "cdrom?"
+            if type == 'CDROM'
+                bound = 'cdrom?'
                 key   = 3000
             else
-                bound = "disk?"
+                bound = 'disk?'
                 key   = 2000
             end
 
@@ -2070,11 +2647,13 @@ module VCenterDriver
             @item.config.hardware.device.each do |dev|
                 used << dev.key
                 next unless send(bound, dev)
+
                 key = dev.key
             end
 
             loop do
-                break if !used.include?(key)
+                break unless used.include?(key)
+
                 key+=1
             end
 
@@ -2083,22 +2662,24 @@ module VCenterDriver
             key
         end
 
-        def calculate_add_disk_spec(disk, position=0)
+        def calculate_add_disk_spec(disk, position = 0)
             img_name_escaped = VCenterDriver::FileHelper.get_img_name(
-                                    disk,
-                                    one_item['ID'],
-                                    self['name'],
-                                    instantiated_as_persistent?)
+                disk,
+                one_item['ID'],
+                self['name'],
+                instantiated_as_persistent?
+            )
 
             img_name = VCenterDriver::FileHelper.unescape_path(img_name_escaped)
 
-            type     = disk["TYPE"]
-            size_kb  = disk["SIZE"].to_i * 1024
+            type     = disk['TYPE']
+            size_kb  = disk['SIZE'].to_i * 1024
 
-            if type == "CDROM"
+            if type == 'CDROM'
                 # CDROM drive will be found in the IMAGE DS
-                ds_ref   = disk["VCENTER_DS_REF"]
-                ds       = VCenterDriver::Storage.new_from_ref(ds_ref, @vi_client)
+                ds_ref   = disk['VCENTER_DS_REF']
+                ds       = VCenterDriver::Storage.new_from_ref(ds_ref,
+                                                               @vi_client)
                 ds_name  = ds['name']
 
                 # CDROM can only be added when the VM is in poweroff state
@@ -2107,9 +2688,9 @@ module VCenterDriver
                     :fileName  => "[#{ds_name}] #{img_name}"
                 )
 
-                if @item["summary.runtime.powerState"] != "poweredOff"
-                    raise "The CDROM image can only be added as an IDE device "\
-                          "when the VM is in the powered off state"
+                if @item['summary.runtime.powerState'] != 'poweredOff'
+                    raise 'The CDROM image can only be added as an IDE device '\
+                          'when the VM is in the powered off state'
                 end
 
                 controller, unit_number = find_free_ide_controller(position)
@@ -2127,7 +2708,7 @@ module VCenterDriver
                     )
                 )
 
-                return {
+                {
                     :operation => :add,
                     :device => device
                 }
@@ -2142,98 +2723,107 @@ module VCenterDriver
                 else
                     controller, unit_number = find_free_controller(position)
                 end
-                storpod = disk["VCENTER_DS_REF"].start_with?('group-')
+                storpod = disk['VCENTER_DS_REF'].start_with?('group-')
                 if storpod
                     vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
-                      :diskMode  => 'persistent',
-                      :fileName  => ""
+                        :diskMode  => 'persistent',
+                        :fileName  => ''
                     )
                 else
                     ds = get_effective_ds(disk)
                     if ds.item._ref.start_with?('group-')
-                        ds_object = self.item.datastore.first
+                        ds_object = item.datastore.first
                         ds_name   = ds_object.name
                     else
                         ds_object = ds.item
                         ds_name = ds['name']
                     end
                     vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
-                      :datastore => ds_object,
-                      :diskMode  => 'persistent',
-                      :fileName  => "[#{ds_name}] #{img_name}"
+                        :datastore => ds_object,
+                        :diskMode  => 'persistent',
+                        :fileName  => "[#{ds_name}] #{img_name}"
                     )
                 end
 
                 device = RbVmomi::VIM::VirtualDisk(
-                  :backing       => vmdk_backing,
-                  :capacityInKB  => size_kb,
-                  :controllerKey => controller.key,
-                  :key           => get_key(type),
-                  :unitNumber    => unit_number
+                    :backing       => vmdk_backing,
+                    :capacityInKB  => size_kb,
+                    :controllerKey => controller.key,
+                    :key           => get_key(type),
+                    :unitNumber    => unit_number
                 )
 
                 config = {
-                  :operation => :add,
+                    :operation => :add,
                   :device    => device
                 }
 
                 # For StorageDRS vCenter must create the file
                 config[:fileOperation] = :create if storpod
 
-                return config
+                config
             end
         end
 
         # Remove the MAC addresses so they cannot be in conflict
         # with OpenNebula assigned mac addresses.
         # We detach all nics from the VM
-        def convert_to_template()
-                detach_all_nics
+        def convert_to_template
+            detach_all_nics
 
-                # We attach new NICs where the MAC address is assigned by vCenter
-                nic_specs = []
-                one_nics = one_item.retrieve_xmlelements("TEMPLATE/NIC")
-                one_nics.each do |nic|
-                    if (nic["OPENNEBULA_MANAGED"] && nic["OPENNEBULA_MANAGED"].upcase == "NO")
-                        nic_specs << calculate_add_nic_spec_autogenerate_mac(nic)
-                    end
-                end
+            # We attach new NICs where the MAC address is assigned by vCenter
+            nic_specs = []
+            one_nics = one_item.retrieve_xmlelements('TEMPLATE/NIC')
+            one_nics.each do |nic|
+                next unless nic['OPENNEBULA_MANAGED'] &&
+                    nic['OPENNEBULA_MANAGED'].upcase == 'NO'
 
-                # Reconfigure VM to add unmanaged nics
-                spec_hash = {}
-                spec_hash[:deviceChange] = nic_specs
-                spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
-                @item.ReconfigVM_Task(:spec => spec).wait_for_completion
+                nic_specs <<
+                    calculate_add_nic_spec_autogenerate_mac(
+                        nic
+                    )
+            end
 
-                # Convert VM to template in vCenter
-                mark_as_template
+            # Reconfigure VM to add unmanaged nics
+            spec_hash = {}
+            spec_hash[:deviceChange] = nic_specs
+            spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
+            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
 
-                # Edit the Opennebula template
-                one_client = OpenNebula::Client.new
-                template_id = one_item['TEMPLATE/TEMPLATE_ID']
-                new_template = OpenNebula::Template.new_with_id(template_id, one_client)
-                new_template.info
+            # Convert VM to template in vCenter
+            mark_as_template
 
-                # unlock VM Template
-                new_template.unlock()
+            # Edit the Opennebula template
+            one_client = OpenNebula::Client.new
+            template_id = one_item['TEMPLATE/TEMPLATE_ID']
+            new_template = OpenNebula::Template.new_with_id(template_id,
+                                                            one_client)
+            new_template.info
 
-                # Update the template reference
-                new_template.update("VCENTER_TEMPLATE_REF=#{@item._ref}", true)
+            # unlock VM Template
+            new_template.unlock
 
-                # Add vCenter template name
-                new_template.update("VCENTER_TEMPLATE_NAME=#{@item.name}", true)
+            # Update the template reference
+            new_template.update("VCENTER_TEMPLATE_REF=#{@item._ref}", true)
+
+            # Add vCenter template name
+            new_template.update("VCENTER_TEMPLATE_NAME=#{@item.name}", true)
         end
 
         def resize_unmanaged_disks
-            spec = {deviceChange: []}
+            spec = { :deviceChange => [] }
             disks_each(:one?) do |d|
                 next unless !d.managed? && d.new_size
+
                 spec[:deviceChange] << d.config(:resize)
             end
 
-            if !spec[:deviceChange].empty?
-                @item.ReconfigVM_Task(:spec => spec).wait_for_completion
-            end
+            return if spec[:deviceChange].empty?
+
+            @item
+                .ReconfigVM_Task(
+                    :spec => spec
+                ).wait_for_completion
         end
 
         def resize_disk(disk)
@@ -2244,72 +2834,81 @@ module VCenterDriver
                 disk.change_size(size)
             end
 
-            spec = {deviceChange: [disk.config(:resize)]}
+            spec = { :deviceChange => [disk.config(:resize)] }
 
-            @item.ReconfigVM_Task(spec: spec).wait_for_completion
+            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
         end
 
         def snapshots?
-            self.clear('rootSnapshot')
+            clear('rootSnapshot')
             self['rootSnapshot'] && !self['rootSnapshot'].empty?
         end
 
         def instantiated_as_persistent?
             begin
-                !!one_item["TEMPLATE/CLONING_TEMPLATE_ID"]
-            rescue
-                return false #one_item may not be retrieved if deploy_id hasn't been set
+                !one_item['TEMPLATE/CLONING_TEMPLATE_ID'].nil?
+            rescue StandardError
+                # one_item may not be retrieved if deploy_id hasn't been set
+                false
             end
         end
 
         def use_linked_clone?
-            one_item["USER_TEMPLATE/VCENTER_LINKED_CLONES"] && one_item["USER_TEMPLATE/VCENTER_LINKED_CLONES"].upcase == "YES"
+            one_item['USER_TEMPLATE/VCENTER_LINKED_CLONES'] &&
+                one_item['USER_TEMPLATE/VCENTER_LINKED_CLONES']
+                    .upcase == 'YES'
         end
 
-        def find_free_ide_controller(position=0)
+        def find_free_ide_controller(_position = 0)
             free_ide_controller = nil
-            ide_schema           = {}
-            devices           = @item.config.hardware.device
-            ideDeviceList = []
+            ide_schema = {}
+            devices = @item.config.hardware.device
 
-            # Iteration to initialize IDE Controllers
             devices.each do |dev|
-                if dev.is_a? RbVmomi::VIM::VirtualIDEController
-                    if ide_schema[dev.key].nil?
-                        ide_schema[dev.key] = {}
-                    end
-                    ide_schema[dev.key][:device] = dev
-                    ide_schema[dev.key][:freeUnitNumber] = [0,1]
+                # Iteration to initialize IDE Controllers
+                next unless dev.is_a? RbVmomi::VIM::VirtualIDEController
+
+                if ide_schema[dev.key].nil?
+                    ide_schema[dev.key] = {}
                 end
+                ide_schema[dev.key][:device] = dev
+                ide_schema[dev.key][:freeUnitNumber] = [0, 1]
             end
 
             # Iteration to match Disks and Cdroms with its controllers
-            devices.each do |dev|
-                if (dev.is_a? RbVmomi::VIM::VirtualDisk) || (dev.is_a? RbVmomi::VIM::VirtualCdrom)
-                    if ide_schema.key?(dev.controllerKey)
-                        ide_schema[dev.controllerKey][:freeUnitNumber].delete(dev.unitNumber)
-                    end
-                end
+            devices.each do |dev| # rubocop:disable Style/CombinableLoops
+                first_condition = dev.is_a? RbVmomi::VIM::VirtualDisk
+                second_condition = dev.is_a? RbVmomi::VIM::VirtualCdrom
+                third_condition = ide_schema.key?(dev.controllerKey)
+
+                next unless (first_condition || second_condition) &&
+                    third_condition
+
+                ide_schema[dev.controllerKey][:freeUnitNumber]
+                    .delete(
+                        dev.unitNumber
+                    )
             end
 
             ide_schema.keys.each do |controller|
-                if ide_schema[controller][:freeUnitNumber].length > 0
+                if ide_schema[controller][:freeUnitNumber].!empty?
                     free_ide_controller = ide_schema[controller]
                     break
                 end
             end
 
             if !free_ide_controller
-                raise "There are no free IDE controllers to connect this CDROM device"
+                raise 'There are no free IDE controllers ' +
+                          'to connect this CDROM device'
             end
 
             controller = free_ide_controller[:device]
             new_unit_number = free_ide_controller[:freeUnitNumber][0]
 
-            return controller, new_unit_number
+            [controller, new_unit_number]
         end
 
-        def find_free_controller(position=0)
+        def find_free_controller(position = 0)
             free_scsi_controllers = []
             scsi_schema           = {}
 
@@ -2328,18 +2927,21 @@ module VCenterDriver
                 end
 
                 next if dev.class != RbVmomi::VIM::VirtualDisk
+
                 used_numbers << dev.unitNumber
             end
 
             15.times do |scsi_id|
-                available_numbers << scsi_id if used_numbers.grep(scsi_id).length <= 0
+                available_numbers <<
+                    scsi_id if used_numbers.grep(scsi_id).length <= 0
             end
 
             scsi_schema.keys.each do |controller|
-                free_scsi_controllers << scsi_schema[controller][:device].deviceInfo.label
+                free_scsi_controllers <<
+                    scsi_schema[controller][:device].deviceInfo.label
             end
 
-            if free_scsi_controllers.length > 0
+            if !free_scsi_controllers.empty?
                 available_controller_label = free_scsi_controllers[0]
             else
                 add_new_scsi(scsi_schema, devices)
@@ -2357,22 +2959,24 @@ module VCenterDriver
 
             new_unit_number = available_numbers.sort[position]
 
-            return controller, new_unit_number
+            [controller, new_unit_number]
         end
 
         def add_new_scsi(scsi_schema, devices)
             controller = nil
 
             if scsi_schema.keys.length >= 4
-                raise "Cannot add a new controller, maximum is 4."
+                raise 'Cannot add a new controller, maximum is 4.'
             end
 
             scsi_key    = 0
             scsi_number = 0
 
-            if scsi_schema.keys.length > 0 && scsi_schema.keys.length < 4
-                scsi_key    = scsi_schema.keys.sort[-1] + 1
-                scsi_number = scsi_schema[scsi_schema.keys.sort[-1]][:device].busNumber + 1
+            if !scsi_schema.keys.empty? && scsi_schema.keys.length < 4
+                scsi_key    =
+                    scsi_schema.keys.max + 1
+                scsi_number =
+                    scsi_schema[scsi_schema.keys.max][:device].busNumber + 1
             end
 
             controller_device = RbVmomi::VIM::VirtualLsiLogicController(
@@ -2393,14 +2997,13 @@ module VCenterDriver
             @item.ReconfigVM_Task(:spec => vm_config_spec).wait_for_completion
 
             devices.each do |device|
-                if device.class == RbVmomi::VIM::VirtualLsiLogicController &&
+                next unless first_ &&
                     device.key == scsi_key
 
-                    controller = device.deviceInfo.label
-                end
+                controller = device.deviceInfo.label
             end
 
-            return controller
+            controller
         end
 
         # Create a snapshot for the VM
@@ -2415,78 +3018,77 @@ module VCenterDriver
                 :quiesce     => true
             }
 
-            vcenter_version = @vi_client.vim.serviceContent.about.apiVersion rescue nil
+            begin
+                @item.CreateSnapshot_Task(snapshot_hash).wait_for_completion
+            rescue StandardError => e
+                error = "Cannot create snapshot for VM: #{e.message}."
 
-            if vcenter_version != "5.5"
-                begin
-                    @item.CreateSnapshot_Task(snapshot_hash).wait_for_completion
-                rescue Exception => e
-                    raise "Cannot create snapshot for VM: #{e.message}\n#{e.backtrace.join("\n")}"
+                if VCenterDriver::CONFIG[:debug_information]
+                    error += "\n\n#{e.backtrace.join("\n")}"
                 end
-            else
-                # B#5045 - If vcenter is 5.5 the snapshot may take longer than
-                # 15 minutes and it does not report that it has finished using
-                # wait_for_completion so we use an active wait instead with a
-                # timeout of 1440 minutes = 24 hours
-                @item.CreateSnapshot_Task(snapshot_hash)
 
-                snapshot_created  = false
-                elapsed_minutes   = 0
-
-                until snapshot_created || elapsed_minutes == 1440
-                    if !!@item['snapshot']
-                        current_snapshot = @item['snapshot.currentSnapshot'] rescue nil
-                        snapshot_found = find_snapshot_in_list(@item['snapshot.rootSnapshotList'], snap_id)
-                        snapshot_created = !!snapshot_found && !!current_snapshot && current_snapshot._ref == snapshot_found._ref
-                    end
-                    sleep(60)
-                    elapsed_minutes += 1
-                end
+                raise error
             end
 
-            return snap_id
+            snap_id
         end
 
         # Revert to a VM snapshot
         def revert_snapshot(snap_id)
-
-            snapshot_list = self["snapshot.rootSnapshotList"]
+            snapshot_list = self['snapshot.rootSnapshotList']
             snapshot = find_snapshot_in_list(snapshot_list, snap_id)
 
-            return nil if !snapshot
+            return unless snapshot
 
             begin
                 revert_snapshot_hash = { :_this => snapshot }
-                snapshot.RevertToSnapshot_Task(revert_snapshot_hash).wait_for_completion
-            rescue Exception => e
-                raise "Cannot revert snapshot of VM: #{e.message}\n#{e.backtrace.join("\n")}"
+                snapshot
+                    .RevertToSnapshot_Task(
+                        revert_snapshot_hash
+                    ).wait_for_completion
+            rescue StandardError => e
+                error = "Cannot revert snapshot of VM: #{e.message}."
+
+                if VCenterDriver::CONFIG[:debug_information]
+                    error += "\n\n#{e.backtrace.join("\n")}"
+                end
+
+                raise error
             end
         end
 
         # Delete VM snapshot
         def delete_snapshot(snap_id)
-
-            snapshot_list = self["snapshot.rootSnapshotList"]
+            snapshot_list = self['snapshot.rootSnapshotList']
             snapshot = find_snapshot_in_list(snapshot_list, snap_id)
 
-            return nil if !snapshot
+            return unless snapshot
 
             begin
                 delete_snapshot_hash = {
                     :_this => snapshot,
                     :removeChildren => false
                 }
-                snapshot.RemoveSnapshot_Task(delete_snapshot_hash).wait_for_completion
-            rescue Exception => e
-                raise "Cannot delete snapshot of VM: #{e.message}\n#{e.backtrace.join("\n")}"
+                snapshot
+                    .RemoveSnapshot_Task(
+                        delete_snapshot_hash
+                    ).wait_for_completion
+            rescue StandardError => e
+                error = "Cannot delete snapshot of VM: #{e.message}."
+
+                if VCenterDriver::CONFIG[:debug_information]
+                    error += "\n\n#{e.backtrace.join("\n")}"
+                end
+
+                raise error
             end
         end
 
         def find_snapshot_in_list(list, snap_id)
             list.each do |i|
-                if i.name == snap_id.to_s
-                    return i.snapshot
-                elsif !i.childSnapshotList.empty?
+                return i.snapshot if i.name == snap_id.to_s
+
+                unless i.childSnapshotList.empty?
                     snap = find_snapshot_in_list(i.childSnapshotList, snap_id)
                     return snap if snap
                 end
@@ -2496,7 +3098,9 @@ module VCenterDriver
         end
 
         def migrate(config = {})
-            raise "You need at least 1 parameter to perform a migration" if config.size == 0
+            if config.empty?
+                raise 'You need at least 1 parameter to perform a migration'
+            end
 
             begin
                 # retrieve host from DRS
@@ -2506,14 +3110,15 @@ module VCenterDriver
 
                 if datastore
                     relocate_spec_params = {
-                        pool:      resourcepool,
-                        datastore: datastore,
+                        :pool => resourcepool,
+                        :datastore => datastore
                     }
 
                     if config[:esx_migration_list].is_a?(String)
-                        if config[:esx_migration_list]==""
-                            relocate_spec_params[:host] = config[:cluster].item.host.sample
-                        elsif config[:esx_migration_list]!="Selected_by_DRS"
+                        if config[:esx_migration_list]==''
+                            relocate_spec_params[:host] =
+                                config[:cluster].item.host.sample
+                        elsif config[:esx_migration_list]!='Selected_by_DRS'
                             hostnames = config[:esx_migration_list].split(' ')
                             hostname = hostnames.sample
                             host_moref = one_cluster.hostname_to_moref(hostname)
@@ -2521,37 +3126,51 @@ module VCenterDriver
                         end
                     end
 
-
-                    relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(relocate_spec_params)
-                    @item.RelocateVM_Task(spec: relocate_spec, priority: "defaultPriority").wait_for_completion
+                    relocate_spec =
+                        RbVmomi::VIM
+                        .VirtualMachineRelocateSpec(
+                            relocate_spec_params
+                        )
+                    @item.RelocateVM_Task(
+                        :spec => relocate_spec,
+                        :priority => 'defaultPriority'
+                    ).wait_for_completion
                 else
-                    @item.MigrateVM_Task(:pool=> resourcepool, :priority => "defaultPriority").wait_for_completion
+                    @item.MigrateVM_Task(
+                        :pool=> resourcepool,
+                        :priority => 'defaultPriority'
+                    ).wait_for_completion
+                end
+            rescue StandardError => e
+                error = "Cannot migrate VM: #{e.message}."
+
+                if VCenterDriver::CONFIG[:debug_information]
+                    error += "\n\n#{e.backtrace.join("\n")}"
                 end
 
-            rescue Exception => e
-                raise "Cannot migrate VM #{e.message}\n#{e.backtrace.join("\n")}"
+                raise error
             end
         end
 
-        ############################################################################
+        ########################################################################
         # actions
-        ############################################################################
+        ########################################################################
 
         def shutdown
-            if !powered_off?
-                begin
-                    if vm_tools?
-                        @item.ShutdownGuest
-                    else
-                        poweroff_hard
-                    end
-                rescue RbVmomi::Fault => e
-                    error = e.message.split(':').first
-                    raise e.message if error != 'InvalidPowerState'
+            return if powered_off?
+
+            begin
+                if vm_tools?
+                    @item.ShutdownGuest
+                else
+                    poweroff_hard
                 end
-                timeout = CONFIG[:vm_poweron_wait_default]
-                wait_timeout(:powered_off?, timeout)
+            rescue RbVmomi::Fault => e
+                error = e.message.split(':').first
+                raise e.message if error != 'InvalidPowerState'
             end
+            timeout = CONFIG[:vm_poweron_wait_default]
+            wait_timeout(:powered_off?, timeout)
         end
 
         def destroy
@@ -2589,11 +3208,11 @@ module VCenterDriver
         end
 
         def powered_on?
-            return @item.runtime.powerState == "poweredOn"
+            @item.runtime.powerState == 'poweredOn'
         end
 
         def powered_off?
-            return @item.runtime.powerState == "poweredOff"
+            @item.runtime.powerState == 'poweredOff'
         end
 
         def poweroff_hard
@@ -2601,7 +3220,10 @@ module VCenterDriver
         end
 
         def remove_all_snapshots(consolidate = true)
-            @item.RemoveAllSnapshots_Task({consolidate: consolidate}).wait_for_completion
+            @item
+                .RemoveAllSnapshots_Task(
+                    { :consolidate => consolidate }
+                ).wait_for_completion
             info_disks
         end
 
@@ -2610,10 +3232,10 @@ module VCenterDriver
         end
 
         def set_running(state, execute = true)
-            value = state ? "yes" : "no"
+            state ? value = 'yes' : value = 'no'
 
             config_array = [
-                { :key => "opennebula.vm.running", :value => value }
+                { :key => 'opennebula.vm.running', :value => value }
             ]
 
             return config_array unless execute
@@ -2626,36 +3248,48 @@ module VCenterDriver
         end
 
         # STATIC MEMBERS, ROUTINES AND CONSTRUCTORS
-        ###############################################################################################
+        ########################################################################
 
         def self.get_vm(opts = {})
             # try to retrieve machine from name
-            if (opts[:name])
-                    matches = opts[:name].match(/^one-(\d*)(-(.*))?$/)
-                    if matches
-                        id = matches[1]
-                        one_vm = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualMachine, id, false)
-                    end
+            if opts[:name]
+                matches = opts[:name].match(/^one-(\d*)(-(.*))?$/)
+                if matches
+                    id = matches[1]
+                    one_vm = VCenterDriver::VIHelper.one_item(
+                        OpenNebula::VirtualMachine, id, false
+                    )
+                end
             end
 
             if one_vm.nil?
-                one_vm = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualMachinePool,
-                                                            "DEPLOY_ID",
-                                                            opts[:ref],
-                                                            opts[:vc_uuid],
-                                                            opts[:pool])
+                one_vm = VCenterDriver::VIHelper
+                         .find_by_ref(
+                             OpenNebula::VirtualMachinePool,
+                             'DEPLOY_ID',
+                             opts[:ref],
+                             opts[:vc_uuid],
+                             opts[:pool]
+                         )
             end
 
-            return one_vm
+            one_vm
         end
 
-        # Migrate a VM to another cluster and/or datastore
-        # @params [int] vm_id ID of the VM to be migrated
-        # params [String] src_host Name of the source cluster
-        # params [String] dst_host Name of the target cluster
-        # params [Bool] hot_ds Wether this is a DS migration with the VM running or not
-        # params [int] ds Destination datastore ID
-        def self.migrate_routine(vm_id, src_host, dst_host, hot_ds = false, ds = nil)
+        # Migrate a VM to another cluster and/or datastore
+        # @params [int] vm_id ID of the VM to be migrated
+        # @params [String] src_host Name of the source cluster
+        # @params [String] dst_host Name of the target cluster
+        # @params [Bool] hot_ds Wether this is a DS migration
+        #         with the VM running or not
+        # @params [int] ds Destination datastore ID
+        def self.migrate_routine(
+            vm_id,
+            src_host,
+            dst_host,
+            hot_ds = false,
+            ds = nil
+        )
             one_client = OpenNebula::Client.new
             pool = OpenNebula::HostPool.new(one_client)
             pool.info
@@ -2667,7 +3301,10 @@ module VCenterDriver
             if ds
                 ds_pool = OpenNebula::DatastorePool.new(one_client)
                 ds_pool.info
-                datastore = ds_pool["/DATASTORE_POOL/DATASTORE[ID='#{ds}']/TEMPLATE/VCENTER_DS_REF"]
+                vcenter_ds_red =
+                    "/DATASTORE_POOL/DATASTORE[ID='#{ds}']" +
+                        '/TEMPLATE/VCENTER_DS_REF'
+                datastore = ds_pool[vcenter_ds_red]
             end
 
             vi_client = VCenterDriver::VIClient.new_from_host(src_id)
@@ -2683,34 +3320,43 @@ module VCenterDriver
             esx_migration_list = dst_host['/HOST/TEMPLATE/ESX_MIGRATION_LIST']
 
             # required vcenter objects
-            vc_vm = VCenterDriver::VirtualMachine.new_without_id(vi_client, vm['/VM/DEPLOY_ID'])
+            vc_vm = VCenterDriver::VirtualMachine
+                    .new_without_id(
+                        vi_client,
+                        vm['/VM/DEPLOY_ID']
+                    )
 
             vc_vm.vm_id = vm_id
 
             ccr_ref  = dst_host['/HOST/TEMPLATE/VCENTER_CCR_REF']
-            vc_host  = VCenterDriver::ClusterComputeResource.new_from_ref(ccr_ref, vi_client)
+            vc_host  = VCenterDriver::ClusterComputeResource.new_from_ref(
+                ccr_ref, vi_client
+            )
 
             config = { :cluster => vc_host }
 
             config[:datastore] = datastore if datastore
             if hot_ds
-                config[:esx_migration_list] = esx_migration_list if esx_migration_list
+                config[:esx_migration_list] =
+                    esx_migration_list if esx_migration_list
             else
-                config[:esx_migration_list] = "Selected_by_DRS"
+                config[:esx_migration_list] = 'Selected_by_DRS'
             end
 
             vc_vm.migrate(config)
 
-            vm.replace({ 'VCENTER_CCR_REF' => ccr_ref})
+            vm.replace({ 'VCENTER_CCR_REF' => ccr_ref })
         end
 
         # Try to build the vcenterdriver virtualmachine without
         # any opennebula id or object, this constructor can find
         # inside the opennebula pool until match
         #
-        # @param vi_client [vi_client] the vcenterdriver client that allows the connection
+        # @param vi_client [vi_client] the vcenterdriver client
+        #        that allows the connection
         # @param ref [String] vcenter ref to the vm
-        # @param opts [Hash] object with pairs that could contain multiple option
+        # @param opts [Hash] object with pairs that could
+        #        contain multiple option
         #        :vc_uuid: give the vcenter uuid directly
         #        :name:    the vcenter vm name for extract the opennebula id
         #
@@ -2725,23 +3371,22 @@ module VCenterDriver
 
             one_vm = VCenterDriver::VirtualMachine.get_vm(opts)
 
-            self.new_one(vi_client, ref, one_vm)
+            new_one(vi_client, ref, one_vm)
         end
 
         # build a vcenterdriver virtual machine from a template
         # this function is used to instantiate vcenter vms
         #
-        # @param vi_client [vi_client] the vcenterdriver client that allows the connection
+        # @param vi_client [vi_client] the vcenterdriver
+        #        client that allows the connection
         # @param drv_action [xmlelement] driver_action that contains the info
         # @param id [int] the if of the opennebula virtual machine
         #
         # @return [vcenterdriver::vm] the virtual machine
-        def self.new_from_clone(vi_client, drv_action, id )
-            spawn = self.new(vi_client, nil, id).tap do |vm|
+        def self.new_from_clone(vi_client, drv_action, id)
+            new(vi_client, nil, id).tap do |vm|
                 vm.clone_vm(drv_action)
             end
-
-            return spawn
         end
 
         # build a vcenterdriver virtual machine
@@ -2751,23 +3396,24 @@ module VCenterDriver
         #
         # @return [vcenterdriver::vm] the virtual machine
         def self.new_with_item(vm_item)
-            self.new(nil, nil, -1).tap do |vm|
-                vm.set_item(vm_item)
+            new(nil, nil, -1).tap do |vm|
+                vm.item_update(vm_item)
             end
         end
 
         # build a vcenterdriver virtual machine
         # with the opennebula object linked
         #
-        # @param vi_client [vi_client] the vcenterdriver client that allows the connection
+        # @param vi_client [vi_client] the vcenterdriver
+        #        client that allows the connection
         # @param ref [String] vcenter ref to the vm
         # @param one_item [one::vm] xmlelement of opennebula
         #
         # @return [vcenterdriver::vm] the virtual machine
         def self.new_one(vi_client, ref, one_item)
-            id = one_item["ID"] || one_item["VM/ID"] rescue -1
+            id = one_item['ID'] || one_item['VM/ID'] rescue -1
 
-            self.new(vi_client, ref, id).tap do |vm|
+            new(vi_client, ref, id).tap do |vm|
                 if one_item.instance_of?(OpenNebula::VirtualMachine)
                     vm.one_item = one_item
                 end
@@ -2777,15 +3423,19 @@ module VCenterDriver
         # build a vcenterdriver virtual machine
         # without opennebula object link, use id = -1 instead
         #
-        # @param vi_client [vi_client] the vcenterdriver client that allows the connection
+        # @param vi_client [vi_client] the vcenterdriver client
+        #        that allows the connection
         # @param ref [String] vcenter ref to the vm
         #
         # @return [vcenterdriver::vm] the virtual machine
         def self.new_without_id(vi_client, ref)
-            self.new(vi_client, ref, -1)
+            new(vi_client, ref, -1)
         end
 
-        ###############################################################################################
-    end # class VirtualMachine
+        ########################################################################
 
-end # module VCenterDriver
+    end
+    # class VirtualMachine
+
+end
+# module VCenterDriver
diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb b/src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb
index bfc9fc7f0a..b0fe75c5e4 100644
--- a/src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb
+++ b/src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb
@@ -49,9 +49,10 @@ module VCenterDriver
             return unless @locking
 
             @locking_file.close
-            if File.exist?('/tmp/vcenter-importer-lock')
-                File.delete('/tmp/vcenter-importer-lock')
-            end
+
+            return unless File.exist?('/tmp/vcenter-importer-lock')
+
+            File.delete('/tmp/vcenter-importer-lock')
         end
 
         def vm?
@@ -64,7 +65,7 @@ module VCenterDriver
             !@item['guest.net'].empty?
         end
 
-        def get_dc
+        def datacenter
             item = @item
 
             trace = []
@@ -102,28 +103,40 @@ module VCenterDriver
             template_name = "one-#{self['name']}" if template_name.empty?
 
             relocate_spec_params = {}
-            relocate_spec_params[:pool] = rp_get
-            relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(relocate_spec_params)
+            relocate_spec_params[:pool] = resource_pool
+            relocate_spec =
+                RbVmomi::VIM
+                .VirtualMachineRelocateSpec(
+                    relocate_spec_params
+                )
 
-            clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec({
-                                                                  :location => relocate_spec,
-                :powerOn  => false,
-                :template => false
-                                                              })
+            clone_spec =
+                RbVmomi::VIM
+                .VirtualMachineCloneSpec(
+                    {
+                        :location => relocate_spec,
+                        :powerOn  => false,
+                        :template => false
+                    }
+                )
 
-            template = nil
             begin
-                template = @item.CloneVM_Task(:folder => @item.parent,
-                                              :name   => template_name,
-                                              :spec   => clone_spec).wait_for_completion
+                template =
+                    @item
+                    .CloneVM_Task(
+                        :folder => @item.parent,
+                        :name   => template_name,
+                        :spec   => clone_spec
+                    ).wait_for_completion
                 template_ref = template._ref
             rescue StandardError => e
                 if !e.message.start_with?('DuplicateName')
-                    error = "Could not create the template clone. Reason: #{e.message}"
+                    error = 'Could not create the template'\
+                            " clone. Reason: #{e.message}"
                     return error, nil
                 end
 
-                dc = get_dc
+                dc = datacenter
                 vm_folder = dc.vm_folder
                 vm_folder.fetch!
                 vm = vm_folder.items
@@ -133,15 +146,22 @@ module VCenterDriver
                 if vm
                     begin
                         vm.Destroy_Task.wait_for_completion
-                        template = @item.CloneVM_Task(:folder => @item.parent,
-                                                      :name   => template_name,
-                                                      :spec   => clone_spec).wait_for_completion
+                        template =
+                            @item
+                            .CloneVM_Task(
+                                :folder => @item.parent,
+                                :name   => template_name,
+                                :spec   => clone_spec
+                            ).wait_for_completion
                         template_ref = template._ref
                     rescue StandardError
-                        error = "Could not delete the existing template, please remove it manually from vCenter. Reason: #{e.message}"
+                        error = 'Could not delete the existing '\
+                                'template, please remove it manually'\
+                                " from vCenter. Reason: #{e.message}"
                     end
                 else
-                    error = "Could not create the template clone. Reason: #{e.message}"
+                    error = 'Could not create the template '\
+                            "clone. Reason: #{e.message}"
                 end
             end
 
@@ -151,8 +171,12 @@ module VCenterDriver
         # Linked Clone over existing template
         def create_delta_disks
             begin
-                disks = @item['config.hardware.device'].grep(RbVmomi::VIM::VirtualDisk)
-                disk_without_snapshots = disks.select {|x| x.backing.parent.nil? }
+                disks =
+                    @item['config.hardware.device']
+                    .grep(RbVmomi::VIM::VirtualDisk)
+                disk_without_snapshots = disks.select do |x|
+                    x.backing.parent.nil?
+                end
             rescue StandardError
                 error = 'Cannot extract existing disks on template.'
                 use_linked_clones = false
@@ -163,11 +187,16 @@ module VCenterDriver
 
                 begin
                     if self['config.template']
-                        @item.MarkAsVirtualMachine(:pool => rp_get, :host => self['runtime.host'])
+                        @item.MarkAsVirtualMachine(
+                            :pool => resource_pool,
+                            :host => self['runtime.host']
+                        )
                     end
                 rescue StandardError => e
                     @item.MarkAsTemplate()
-                    error = "Cannot mark the template as a VirtualMachine. Not using linked clones. Reason: #{e.message}/#{e.backtrace}"
+                    error = 'Cannot mark the template as a VirtualMachine. '\
+                            'Not using linked clones. '\
+                            "Reason: #{e.message}/#{e.backtrace}"
                     use_linked_clones = false
                     return error, use_linked_clones
                 end
@@ -177,48 +206,71 @@ module VCenterDriver
                     spec[:deviceChange] = []
 
                     disk_without_snapshots.each do |disk|
-                        remove_disk_spec = { :operation => :remove, :device => disk }
+                        remove_disk_spec =
+                            {
+                                :operation => :remove,
+                                :device => disk
+                            }
                         spec[:deviceChange] << remove_disk_spec
 
-                        add_disk_spec = { :operation => :add,
-                                        :fileOperation => :create,
-                                        :device => disk.dup.tap do |x|
-                                            x.backing = x.backing.dup
-                                            x.backing.fileName = "[#{disk.backing.datastore.name}]"
-                                            x.backing.parent = disk.backing
-                                        end }
+                        add_disk_spec =
+                            {
+                                :operation => :add,
+                                :fileOperation => :create,
+                                :device => disk.dup.tap do |x|
+                                    x.backing =
+                                        x.backing.dup
+                                    x.backing.fileName =
+                                        "[#{disk.backing.datastore.name}]"
+                                    x.backing.parent =
+                                        disk.backing
+                                end
+                            }
                         spec[:deviceChange] << add_disk_spec
                     end
 
-                    @item.ReconfigVM_Task(:spec => spec).wait_for_completion unless spec[:deviceChange].empty?
+                    @item
+                        .ReconfigVM_Task(
+                            :spec => spec
+                        ).wait_for_completion unless spec[:deviceChange].empty?
                 rescue StandardError => e
-                    error = "Cannot create the delta disks on top of the template. Reason: #{e.message}."
+                    error = 'Cannot create the delta disks on top '\
+                            "of the template. Reason: #{e.message}."
+
+                    if VCenterDriver::CONFIG[:debug_information]
+                        error += "\n\n#{e.backtrace}"
+                    end
+
                     use_linked_clones = false
                     return error, use_linked_clones
                 end
 
                 begin
                     @item.MarkAsTemplate()
-                rescue StandardError
-                    error = 'Cannot mark the VirtualMachine as a template. Not using linked clones.'
+                rescue StandardError => e
+                    error = 'Cannot mark the VirtualMachine as '\
+                            'a template. Not using linked clones.' \
+                            " Reason: #{e.message}."
+
+                    if VCenterDriver::CONFIG[:debug_information]
+                        error += "\n\n#{e.backtrace}"
+                    end
+
                     use_linked_clones = false
                     return error, use_linked_clones
                 end
-
-                error = nil
-                use_linked_clones = true
-                [error, use_linked_clones]
-            else
-                # Template already has delta disks
-                error = nil
-                use_linked_clones = true
-                [error, use_linked_clones]
             end
+
+            error = nil
+            use_linked_clones = true
+
+            [error, use_linked_clones]
         end
 
         ########################################################################
         # Import vcenter disks
-        # @param type [object] contains the type of the object(:object) and identifier(:id)
+        # @param type [object] contains the type of the object(:object) and
+        # identifier(:id)
         # @return error, template_disks
         ########################################################################
         def import_vcenter_disks(vc_uuid, dpool, ipool, type)
@@ -227,10 +279,10 @@ module VCenterDriver
             images = []
 
             begin
-                lock # Lock import operation, to avoid concurrent creation of images
+                # Lock import operation, to avoid concurrent creation of images
+                lock
 
-                # #ccr_ref = self["runtime.host.parent._ref"]
-                dc = get_dc
+                dc = datacenter
                 dc_ref = dc.item._ref
 
                 # Get disks and info required
@@ -245,14 +297,24 @@ module VCenterDriver
                     begin
                         ds_ref = disk[:datastore]._ref
                     rescue StandardError
-                        raise "The ISO #{disk[:path_wo_ds].name} cannot be found because the datastore was removed or deleted"
+                        raise "The ISO #{disk[:path_wo_ds].name} cannot "\
+                              'be found because the datastore was '\
+                              'removed or deleted'
                     end
-                    datastore_found = VCenterDriver::Storage.get_one_image_ds_by_ref_and_dc(ds_ref,
-                                                                                            dc_ref,
-                                                                                            vc_uuid,
-                                                                                            dpool)
+                    datastore_found =
+                        VCenterDriver::Storage
+                        .get_one_image_ds_by_ref_and_dc(
+                            ds_ref,
+                            dc_ref,
+                            vc_uuid,
+                            dpool
+                        )
+
                     if datastore_found.nil?
-                        error = "\n    ERROR: datastore #{disk[:datastore].name}: has to be imported first as an image datastore!\n"
+                        error = "\n    ERROR: datastore "\
+                                "#{disk[:datastore].name}: "\
+                                'has to be imported first as'\
+                                " an image datastore!\n"
 
                         # Rollback delete disk images
                         allocated_images.each do |i|
@@ -273,13 +335,18 @@ module VCenterDriver
                         :images => images
                     }
 
-                    image_import, image_name = VCenterDriver::Datastore.get_image_import_template(params)
+                    image_import, image_name =
+                        VCenterDriver::Datastore
+                        .get_image_import_template(
+                            params
+                        )
                     # Image is already in the datastore
                     if image_import[:one]
                         # This is the disk info
                         disk_tmp = ''
                         disk_tmp << "DISK=[\n"
-                        disk_tmp << "IMAGE_ID=\"#{image_import[:one]['ID']}\",\n"
+                        disk_tmp <<
+                            "IMAGE_ID=\"#{image_import[:one]['ID']}\",\n"
                         disk_tmp << "OPENNEBULA_MANAGED=\"NO\"\n"
                         disk_tmp << "]\n"
                         disk_info << disk_tmp
@@ -287,12 +354,18 @@ module VCenterDriver
                     elsif !image_import[:template].empty?
 
                         # Then the image is created as it's not in the datastore
-                        one_i = VCenterDriver::VIHelper.new_one_item(OpenNebula::Image)
+                        one_i =
+                            VCenterDriver::VIHelper
+                            .new_one_item(
+                                OpenNebula::Image
+                            )
                         allocated_images << one_i
-                        rc = one_i.allocate(image_import[:template], datastore_found['ID'].to_i, false)
+                        rc = one_i.allocate(image_import[:template],
+                                            datastore_found['ID'].to_i, false)
 
                         if OpenNebula.is_error?(rc)
-                            error = "    Error creating disk from template: #{rc.message}\n"
+                            error = '    Error creating disk from '\
+                                    "template: #{rc.message}\n"
                             break
                         end
 
@@ -300,7 +373,10 @@ module VCenterDriver
                         one_i.info
                         start_time = Time.now
 
-                        while (one_i.state_str != 'READY') && (Time.now - start_time < 300)
+                        first_condition = one_i.state_str != 'READY'
+                        second_condition = Time.now - start_time < 300
+
+                        while first_condition && second_condition
                             sleep 1
                             one_i.info
                         end
@@ -316,7 +392,13 @@ module VCenterDriver
                     end
                 end
             rescue StandardError => e
-                error = "\n    There was an error trying to create an image for disk in vcenter template. Reason: #{e.message}\n#{e.backtrace}"
+                error = "\n    There was an error trying to create an "\
+                        'image for disk in vcenter template. '\
+                        "Reason: #{e.message}"
+
+                if VCenterDriver::CONFIG[:debug_information]
+                    error += "\n\n#{e.backtrace}"
+                end
             ensure
                 unlock
                 if !error.empty? && allocated_images
@@ -330,64 +412,71 @@ module VCenterDriver
             [error, disk_info, allocated_images]
         end
 
+        ########################################################################
+        # Create AR
+        # @param nic [object] contains properties of the nic
+        # @param with_id [Boolean] determine if AR will contains AR_ID
+        # @param ipv4 [string] create the AR with a IPv4 address
+        # @param ipv6 [string] create the AR with a IPv6 address
+        #
+        # * in case of IPv6 we use a standard PREFIX_LENGTH = 64
+        # * if we pass ipv4 we force nic use that IPv4
+        # * if we pass ipv6 we force nic use that IPv6
+        # @return ar_tmp
+        ########################################################################
         def create_ar(nic, with_id = false, ipv4 = nil, ipv6 = nil)
-            ar_tmp = ''
+            ar_tmp = "AR=[\n"
+
+            # if ipv4 and ipv6 are defined create a IPv4 address with a static
+            # IPv6 address
             if ipv4 && ipv6
-                ar_tmp << "AR=[\n"
                 ar_tmp << "TYPE=\"IP4_6_STATIC\",\n"
                 ar_tmp << "IP=\"#{ipv4}\",\n"
                 ar_tmp << "IP6=\"#{ipv6}\",\n"
                 ar_tmp << "PREFIX_LENGTH=\"64\",\n"
-                ar_tmp << "SIZE=\"1\"\n"
-                ar_tmp << "]\n"
+            # if just ipv4 is defined create a AR with just a IPv4 address
             elsif ipv4
-                ar_tmp << "AR=[\n"
                 ar_tmp << "TYPE=\"IP4\",\n"
                 ar_tmp << "IP=\"#{ipv4}\",\n"
-                ar_tmp << "SIZE=\"1\"\n"
-                ar_tmp << "]\n"
+            # if just ipv6 is defined create a AR with just a IPv4 address
             elsif ipv6
-                ar_tmp << "AR=[\n"
                 ar_tmp << "TYPE=\"IP6_STATIC\",\n"
                 ar_tmp << "IP6=\"#{ipv6}\",\n"
                 ar_tmp << "PREFIX_LENGTH=\"64\",\n"
-                ar_tmp << "SIZE=\"1\"\n"
-                ar_tmp << "]\n"
+            # in case nic have defined mac, ipv4 and ipv6 create a AR with
+            # this configuration
             elsif nic[:mac] && nic[:ipv4] && nic[:ipv6]
-                ar_tmp << "AR=[\n"
                 ar_tmp << "AR_ID=0,\n" if with_id
                 ar_tmp << "TYPE=\"IP4_6_STATIC\",\n"
                 ar_tmp << "IP=\"#{nic[:ipv4]}\",\n"
                 ar_tmp << "MAC=\"#{nic[:mac]}\",\n"
                 ar_tmp << "IP6=\"#{nic[:ipv6]}\",\n"
                 ar_tmp << "PREFIX_LENGTH=\"64\",\n"
-                ar_tmp << "SIZE=\"1\"\n"
-                ar_tmp << "]\n"
+            # in case nic have defined mac and ipv6 create a AR with
+            # this configuration
             elsif nic[:mac] && nic[:ipv6]
-                ar_tmp << "AR=[\n"
                 ar_tmp << "AR_ID=0,\n" if with_id
                 ar_tmp << "TYPE=\"IP6_STATIC\",\n"
                 ar_tmp << "MAC=\"#{nic[:mac]}\",\n"
                 ar_tmp << "IP6=\"#{nic[:ipv6]}\",\n"
                 ar_tmp << "PREFIX_LENGTH=\"64\",\n"
-                ar_tmp << "SIZE=\"1\"\n"
-                ar_tmp << "]\n"
+            # in case nic have defined mac and ipv4 create a AR with
+            # this configuration
             elsif nic[:mac] && nic[:ipv4]
-                ar_tmp << "AR=[\n"
                 ar_tmp << "AR_ID=0,\n" if with_id
                 ar_tmp << "TYPE=\"IP4\",\n"
                 ar_tmp << "IP=\"#{nic[:ipv4]}\",\n"
                 ar_tmp << "MAC=\"#{nic[:mac]}\",\n"
-                ar_tmp << "SIZE=\"1\"\n"
-                ar_tmp << "]\n"
+            # in case nic not have any default configuration create ETHER
             else
-                ar_tmp << "AR=[\n"
                 ar_tmp << "AR_ID=0,\n" if with_id
                 ar_tmp << "TYPE=\"ETHER\",\n"
                 ar_tmp << "MAC=\"#{nic[:mac]}\",\n"
-                ar_tmp << "SIZE=\"1\"\n"
-                ar_tmp << "]\n"
             end
+
+            ar_tmp << "SIZE=\"1\"\n"
+            ar_tmp << "]\n"
+
             ar_tmp
         end
 
@@ -396,20 +485,23 @@ module VCenterDriver
             ars_new = network_found.to_hash['VNET']['AR_POOL']['AR']
             ars_new = [ars_new] if ars_new.class.to_s.eql? 'Hash'
             last_id = ars_new.last['AR_ID']
-            if ar_ids.has_key?(nic[:net_ref])
+            if ar_ids.key?(nic[:net_ref])
                 ref = nic[:net_ref]
                 value = ar_ids[ref.to_s]
-                value.insert(value.length, last_id)
-                ar_ids.store(nic[:net_ref], value)
-            else
-                value.insert(value.length, last_id)
-                ar_ids.store(nic[:net_ref], value)
             end
 
+            value.insert(value.length, last_id)
+            ar_ids.store(nic[:net_ref], value)
+
             last_id
         end
 
-        def find_alias_ips_in_network(network, vm_object, alias_ipv4 = nil, alias_ipv6 = nil)
+        def find_alias_ips_in_network(
+            network,
+            vm_object,
+            alias_ipv4 = nil,
+            alias_ipv6 = nil
+        )
             ipv4 = ipv6 = ''
             return unless vm_object.is_a?(VCenterDriver::VirtualMachine)
 
@@ -436,7 +528,13 @@ module VCenterDriver
             [ipv4, ipv6]
         end
 
-        def find_ips_in_network(network, vm_object, nic, force = false, first_ip = false)
+        def find_ips_in_network(
+            network,
+            vm_object,
+            nic,
+            force = false,
+            first_ip = false
+        )
             ipv4 = ipv6 = ''
             ar_id = -1
             return unless vm_object.is_a?(VCenterDriver::VirtualMachine)
@@ -462,7 +560,8 @@ module VCenterDriver
 
                         ar_array = network.to_hash['VNET']['AR_POOL']['AR']
                         ar_array = [ar_array] if ar_array.is_a?(Hash)
-                        ipv4, ipv6, ar_id = find_ip_in_ar(ip, ar_array) if ar_array
+                        ipv4, ipv6, ar_id = find_ip_in_ar(ip,
+                                                          ar_array) if ar_array
 
                         if first_ip
                             return [ipv4, ipv6, ar_id]
@@ -480,34 +579,46 @@ module VCenterDriver
             ipv4 = ipv6 = ''
             ar_id = -1
             ar_array.each do |ar|
-                next unless ((ar.key?('IP') && ar.key?('IP_END')) || (ar.key?('IP6') && ar.key?('IP6_END')))
+                first_condition = ar.key?('IP') && ar.key?('IP_END')
+                second_condition = ar.key?('IP6') && ar.key?('IP6_END')
 
-                start_ip = IPAddr.new(ar['IP']) if !ar['IP'].nil?
-                end_ip = IPAddr.new(ar['IP_END']) if !ar['IP_END'].nil?
-                start_ip = IPAddr.new(ar['IP6']) if !ar['IP6'].nil?
-                end_ip = IPAddr.new(ar['IP6_END']) if !ar['IP6_END'].nil?
+                next unless first_condition || second_condition
+
+                start_ip = IPAddr.new(ar['IP']) unless ar['IP'].nil?
+                end_ip = IPAddr.new(ar['IP_END']) unless ar['IP_END'].nil?
+                start_ip = IPAddr.new(ar['IP6']) unless ar['IP6'].nil?
+                end_ip = IPAddr.new(ar['IP6_END']) unless ar['IP6_END'].nil?
 
                 next unless ip.family == start_ip.family &&
                             ip.family == end_ip.family
 
-                if ip >= start_ip && ip <= end_ip
-                    ipv4 = ip.to_s if ip.ipv4?
-                    ipv6 = ip.to_s if ip.ipv6?
-                    ar_id = ar['ID']
-                end
+                next unless ip >= start_ip && ip <= end_ip
+
+                ipv4 = ip.to_s if ip.ipv4?
+                ipv6 = ip.to_s if ip.ipv6?
+                ar_id = ar['ID']
             end
             [ipv4, ipv6, ar_id]
         end
 
         def nic_alias_from_nic(id, nic, nic_index, network_found, vm_object)
-            nic_tmp = ""
+            nic_tmp = ''
 
             nic_alias_index = 1
             if nic[:ipv4_additionals]
-                nic[:ipv4_additionals].split(",").each do |ipv4_additional|
-                    ipv4, ipv6 = find_alias_ips_in_network(network_found, vm_object, alias_ipv4 = ipv4_additional)
+                nic[:ipv4_additionals].split(',').each do |ipv4_additional|
+                    ipv4, ipv6 =
+                        find_alias_ips_in_network(
+                            network_found,
+                            vm_object,
+                            ipv4_additional
+                        )
                     if ipv4.empty? && ipv6.empty?
-                        ar_tmp = create_ar(nic, with_id = false, ipv4 = ipv4_additional)
+                        ar_tmp = create_ar(
+                            nic,
+                            false,
+                            ipv4_additional
+                        )
                         network_found.add_ar(ar_tmp)
                     end
                     network_found.info
@@ -515,17 +626,27 @@ module VCenterDriver
                     nic_tmp << "NIC_ALIAS=[\n"
                     nic_tmp << "NETWORK_ID=\"#{id}\",\n"
                     nic_tmp << "IP=\"#{ipv4_additional}\",\n"
-                    nic_tmp << "NAME=\"NIC#{nic_index}_ALIAS#{nic_alias_index}\",\n"
+                    nic_tmp <<
+                        "NAME=\"NIC#{nic_index}_ALIAS#{nic_alias_index}\",\n"
                     nic_tmp << "PARENT=\"NIC#{nic_index}\"\n"
                     nic_tmp << "]\n"
                     nic_alias_index += 1
                 end
             end
             if nic[:ipv6_additionals]
-                nic[:ipv6_additionals].split(",").each do |ipv6_additional|
-                    ipv4, ipv6 = find_alias_ips_in_network(network_found, vm_object, alias_ipv6 = ipv6_additional)
+                nic[:ipv6_additionals].split(',').each do |ipv6_additional|
+                    ipv4, ipv6 = find_alias_ips_in_network(
+                        network_found,
+                        vm_object,
+                        ipv6_additional
+                    )
                     if ipv4.empty? && ipv6.empty?
-                        ar_tmp = create_ar(nic, with_id = false, ipv4 = nil, ipv6 = ipv6_additional)
+                        ar_tmp = create_ar(
+                            nic,
+                            false,
+                            nil,
+                            ipv6_additional
+                        )
                         network_found.add_ar(ar_tmp)
                     end
                     network_found.info
@@ -533,7 +654,8 @@ module VCenterDriver
                     nic_tmp << "NIC_ALIAS=[\n"
                     nic_tmp << "NETWORK_ID=\"#{id}\",\n"
                     nic_tmp << "IP6=\"#{ipv6_additional}\",\n"
-                    nic_tmp << "NAME=\"NIC#{nic_index}_ALIAS#{nic_alias_index}\",\n"
+                    nic_tmp <<
+                        "NAME=\"NIC#{nic_index}_ALIAS#{nic_alias_index}\",\n"
                     nic_tmp << "PARENT=\"NIC#{nic_index}\"\n"
                     nic_tmp << "]\n"
                     nic_alias_index += 1
@@ -543,7 +665,7 @@ module VCenterDriver
             nic_tmp
         end
 
-        def nic_from_network_created(one_vn, nic, nic_index, vm_object, ar_ids)
+        def nic_from_network_created(one_vn, nic, nic_index, vm_object, _ar_ids)
             nic_tmp = "NIC=[\n"
             nic_tmp << "NETWORK_ID=\"#{one_vn.id}\",\n"
             nic_tmp << "NAME =\"NIC#{nic_index}\",\n"
@@ -554,7 +676,8 @@ module VCenterDriver
                 end
                 if nic[:ipv4_additionals]
                     nic_tmp <<
-                        "VCENTER_ADDITIONALS_IP4=\"#{nic[:ipv4_additionals]}\",\n"
+                        'VCENTER_ADDITIONALS_IP4'\
+                        "=\"#{nic[:ipv4_additionals]}\",\n"
                 end
                 if nic[:ipv6]
                     nic_tmp <<
@@ -570,7 +693,8 @@ module VCenterDriver
                 end
                 if nic[:ipv6_additionals]
                     nic_tmp <<
-                        "VCENTER_ADDITIONALS_IP6=\"#{nic[:ipv6_additionals]}\",\n"
+                        'VCENTER_ADDITIONALS_IP6'\
+                        "=\"#{nic[:ipv6_additionals]}\",\n"
                 end
             end
 
@@ -578,24 +702,33 @@ module VCenterDriver
             nic_tmp << "]\n"
 
             if vm?
-                nic_tmp << nic_alias_from_nic(one_vn.id, nic, nic_index, one_vn, vm_object)
+                nic_tmp << nic_alias_from_nic(one_vn.id, nic, nic_index,
+                                              one_vn, vm_object)
             end
 
             nic_tmp
         end
 
-        def nic_from_network_found(network_found, vm_object, nic, ar_ids, nic_index)
+        def nic_from_network_found(
+            network_found,
+            vm_object,
+            nic,
+            _ar_ids,
+            nic_index
+        )
             nic_tmp = "NIC=[\n"
             nic_tmp << "NETWORK_ID=\"#{network_found['ID']}\",\n"
             nic_tmp << "NAME =\"NIC#{nic_index}\",\n"
 
             if vm?
-                ipv4, ipv6 = find_ips_in_network(network_found, vm_object, nic, false, true)
+                ipv4, ipv6 = find_ips_in_network(network_found, vm_object,
+                                                 nic, false, true)
                 if ipv4.empty? && ipv6.empty?
                     ar_tmp = create_ar(nic)
                     network_found.add_ar(ar_tmp)
                 end
-                ipv4, ipv6 = find_ips_in_network(network_found, vm_object, nic, true)
+                ipv4, ipv6 = find_ips_in_network(network_found, vm_object,
+                                                 nic, true)
                 network_found.info
 
                 # This is the existing nic info
@@ -606,7 +739,8 @@ module VCenterDriver
                 nic_tmp << "IP6=\"#{ipv6}\"," unless ipv6.empty?
                 if nic[:ipv4_additionals]
                     nic_tmp <<
-                        "VCENTER_ADDITIONALS_IP4=\"#{nic[:ipv4_additionals]}\",\n"
+                        'VCENTER_ADDITIONALS_IP4'\
+                        "=\"#{nic[:ipv4_additionals]}\",\n"
                 end
                 if nic[:ipv6]
                     nic_tmp << "VCENTER_IP6=\"#{nic[:ipv6]}\",\n"
@@ -622,7 +756,8 @@ module VCenterDriver
 
                 if nic[:ipv6_additionals]
                     nic_tmp <<
-                        "VCENTER_ADDITIONALS_IP6=\"#{nic[:ipv6_additionals]}\",\n"
+                        'VCENTER_ADDITIONALS_IP6'\
+                        "=\"#{nic[:ipv6_additionals]}\",\n"
                 end
             end
 
@@ -630,30 +765,38 @@ module VCenterDriver
             nic_tmp << "]\n"
 
             if vm?
-                nic_tmp << nic_alias_from_nic(network_found['ID'], nic, nic_index, network_found, vm_object)
+                nic_tmp <<
+                    nic_alias_from_nic(
+                        network_found['ID'],
+                        nic,
+                        nic_index,
+                        network_found,
+                        vm_object
+                    )
             end
 
             nic_tmp
         end
 
-
         # Creates an OpenNebula Virtual Network as part of the VM Template
         # import process. This only need to  happen if no VNET in OpenNebula
         # is present that refers to the network where the NIC of the VM Template
         # is hooked to.
         def create_network_for_import(
-            nic,
-            ccr_ref,
-            ccr_name,
-            vc_uuid,
-            vcenter_instance_name,
-            dc_name,
-            template_ref,
-            dc_ref,
-            vm_id,
-            hpool,
-            vi_client
+            opts
         )
+            nic = opts[:nic]
+            ccr_ref = opts[:ccr_ref]
+            ccr_name = opts[:ccr_name]
+            vc_uuid = opts[:vc_uuid]
+            vcenter_instance_name = opts[:vcenter_instance_name]
+            dc_name = opts[:dc_name]
+            template_ref = opts[:template_ref]
+            dc_ref = opts[:dc_ref]
+            vm_id = opts[:vm_id]
+            hpool = opts[:hpool]
+            vi_client = opts[:vi_client]
+
             config = {}
             config[:refs] = nic[:refs]
 
@@ -677,10 +820,10 @@ module VCenterDriver
             end
 
             net = VCenterDriver::Network
-                      .new_from_ref(
-                          nic[:net_ref],
-                          vi_client
-                      )
+                  .new_from_ref(
+                      nic[:net_ref],
+                      vi_client
+                  )
             if net
                 vid = VCenterDriver::Network.retrieve_vlanid(net.item)
             end
@@ -689,9 +832,9 @@ module VCenterDriver
             when VCenterDriver::Network::NETWORK_TYPE_DPG
                 config[:sw_name] =
                     nic[:network]
-                        .config
-                        .distributedVirtualSwitch
-                        .name
+                    .config
+                    .distributedVirtualSwitch
+                    .name
                 # For DistributedVirtualPortgroups
                 # there is networks and uplinks
                 config[:uplink] = false
@@ -699,9 +842,9 @@ module VCenterDriver
             when VCenterDriver::Network::NETWORK_TYPE_NSXV
                 config[:sw_name] =
                     nic[:network]
-                        .config
-                        .distributedVirtualSwitch
-                        .name
+                    .config
+                    .distributedVirtualSwitch
+                    .name
                 # For NSX-V ( is the same as
                 # DistributedVirtualPortgroups )
                 # there is networks and uplinks
@@ -712,9 +855,9 @@ module VCenterDriver
                 begin
                     nsx_client =
                         NSXDriver::NSXClient
-                            .new_from_id(
-                                host_id
-                            )
+                        .new_from_id(
+                            host_id
+                        )
                 rescue StandardError
                     nsx_client = nil
                 end
@@ -722,10 +865,10 @@ module VCenterDriver
                 if !nsx_client.nil?
                     nsx_net =
                         NSXDriver::VirtualWire
-                            .new_from_name(
-                                nsx_client,
-                                nic[:net_name]
-                            )
+                        .new_from_name(
+                            nsx_client,
+                            nic[:net_name]
+                        )
 
                     config[:nsx_id] = nsx_net.ls_id
                     config[:nsx_vni] = nsx_net.ls_vni
@@ -739,13 +882,13 @@ module VCenterDriver
                 config[:uplink] = false
                 config[:sw_name] =
                     VCenterDriver::Network
-                        .virtual_switch(
-                            nic[:network]
-                        )
+                    .virtual_switch(
+                        nic[:network]
+                    )
                 # NSX-T PortGroups
             when VCenterDriver::Network::NETWORK_TYPE_NSXT
                 config[:sw_name] = \
-                                nic[:network].summary.opaqueNetworkType
+                    nic[:network].summary.opaqueNetworkType
                 # There is no uplinks for NSX-T networks,
                 # so all NSX-T networks
                 # are networks and no uplinks
@@ -762,10 +905,10 @@ module VCenterDriver
                 if !nsx_client.nil?
                     nsx_net =
                         NSXDriver::OpaqueNetwork
-                            .new_from_name(
-                                nsx_client,
-                                nic[:net_name]
-                            )
+                        .new_from_name(
+                            nsx_client,
+                            nic[:net_name]
+                        )
 
                     config[:nsx_id] = nsx_net.ls_id
                     config[:nsx_vni] = nsx_net.ls_vni
@@ -834,28 +977,32 @@ module VCenterDriver
             one_vn.info
 
             one_vn
-
-       end
+        end
 
         def import_vcenter_nics(
-            vi_client,
-            vc_uuid,
-            npool,
-            hpool,
-            vcenter_instance_name,
-            template_ref,
-            vm_object,
+            opts,
             vm_id = nil,
             dc_name = nil
         )
+
+            vi_client = opts[:vi_client]
+            vc_uuid = opts[:vc_uuidv]
+            npool = opts[:npool]
+            hpool = opts[:hpool]
+            vcenter_instance_name = opts[:vcenter]
+            template_ref = opts[:template_moref]
+            vm_object = opts[:vm_object]
+
             nic_info = ''
             error = ''
             ar_ids = {}
             begin
-                lock # Lock import operation, to avoid concurrent creation of networks
+                # Lock import operation, to avoid
+                # concurrent creation of networks
+                lock
 
                 if !dc_name
-                    dc = get_dc
+                    dc = datacenter
                     dc_name = dc.item.name
                     dc_ref  = dc.item._ref
                 end
@@ -873,11 +1020,15 @@ module VCenterDriver
 
                 vc_nics.each do |nic|
                     # Check if the network already exists
-                    network_found = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool,
-                                                                        'TEMPLATE/VCENTER_NET_REF',
-                                                                        nic[:net_ref],
-                                                                        vc_uuid,
-                                                                        npool)
+                    network_found =
+                        VCenterDriver::VIHelper
+                        .find_by_ref(
+                            OpenNebula::VirtualNetworkPool,
+                            'TEMPLATE/VCENTER_NET_REF',
+                            nic[:net_ref],
+                            vc_uuid,
+                            npool
+                        )
                     # Network is already in OpenNebula
                     if network_found
                         nic_info << nic_from_network_found(network_found,
@@ -887,17 +1038,23 @@ module VCenterDriver
                                                            nic_index.to_s)
                     # Network not found
                     else
-                        one_vn = create_network_for_import(nic,
-                                                           ccr_ref,
-                                                           ccr_name,
-                                                           vc_uuid,
-                                                           vcenter_instance_name,
-                                                           dc_name,
-                                                           template_ref,
-                                                           dc_ref,
-                                                           vm_id,
-                                                           hpool,
-                                                           vi_client)
+                        opts = {
+                            :nic => nic,
+                            :ccr_ref => ccr_ref,
+                            :ccr_name => ccr_name,
+                            :vc_uuid => vc_uuid,
+                            :vcenter_instance_name => vcenter_instance_name,
+                            :dc_name => dc_name,
+                            :template_ref => template_ref,
+                            :dc_ref => dc_ref,
+                            :vm_id => vm_id,
+                            :hpool => hpool,
+                            :vi_client => vi_client
+                        }
+
+                        one_vn = create_network_for_import(
+                            opts
+                        )
 
                         allocated_networks << one_vn
 
@@ -1011,7 +1168,7 @@ module VCenterDriver
             disks
         end
 
-        def vcenter_nics_get
+        def vcenter_nics_list
             nics = []
             @item.config.hardware.device.each do |device|
                 nics << device if VCenterDriver::Network.nic?(device)
@@ -1120,30 +1277,29 @@ module VCenterDriver
 
                 nic = retrieve_from_device(device)
                 nic[:mac] = device.macAddress rescue nil
-                if vm?
-                    if online?
-                        inets_raw ||=
-                            @item['guest.net']
-                            .map
-                            .with_index {|x, _| [x.macAddress, x] }
-                        inets = parse_live.call(inets_raw) if inets.empty?
 
-                        if !inets[nic[:mac]].nil?
-                            ip_addresses =
-                                inets[nic[:mac]]
-                                .ipConfig
-                                .ipAddress rescue nil
-                        end
+                if vm? && online?
+                    inets_raw ||=
+                        @item['guest.net']
+                        .map
+                        .with_index {|x, _| [x.macAddress, x] }
+                    inets = parse_live.call(inets_raw) if inets.empty?
 
-                        if !ip_addresses.nil? && !ip_addresses.empty?
-                            nic[:ipv4],
-                            nic[:ipv4_additionals] = nil
-                            nic[:ipv6],
-                            nic[:ipv6_ula],
-                            nic[:ipv6_global],
-                            nic[:ipv6_additionals] = nil
-                            fill_nic(ip_addresses, nic)
-                        end
+                    if !inets[nic[:mac]].nil?
+                        ip_addresses =
+                            inets[nic[:mac]]
+                            .ipConfig
+                            .ipAddress rescue nil
+                    end
+
+                    if !ip_addresses.nil? && !ip_addresses.empty?
+                        nic[:ipv4],
+                        nic[:ipv4_additionals] = nil
+                        nic[:ipv6],
+                        nic[:ipv6_ula],
+                        nic[:ipv6_global],
+                        nic[:ipv6_additionals] = nil
+                        fill_nic(ip_addresses, nic)
                     end
                 end
                 nics << nic
@@ -1172,11 +1328,11 @@ module VCenterDriver
                         nic[:ipv6_ula] = ip
                     else
                         if nic[:ipv6]
-                           if nic[:ipv6_additionals]
-                               nic[:ipv6_additionals] += ',' + ip
-                           else
-                               nic[:ipv6_additionals] = ip
-                           end
+                            if nic[:ipv6_additionals]
+                                nic[:ipv6_additionals] += ',' + ip
+                            else
+                                nic[:ipv6_additionals] = ip
+                            end
                         else
                             nic[:ipv6] = ip
                         end
@@ -1186,17 +1342,21 @@ module VCenterDriver
         end
 
         def get_ipv6_prefix(ipv6, prefix_length)
-            ip_slice = ipv6.split(':').map {|elem| elem.hex }.map do |elem|
-                int, dec = elem.divmod(1)
-                bin = int.to_s(2).to_s
+            ip_slice =
+                ipv6
+                .split(':')
+                .map {|elem| elem.hex }
+                .map do |elem|
+                    int, dec = elem.divmod(1)
+                    bin = int.to_s(2).to_s
 
-                while dec > 0
-                    int, dec = (dec * 2).divmod(1)
-                    bin << int.to_s
-                end
+                    while dec > 0
+                        int, dec = (dec * 2).divmod(1)
+                        bin << int.to_s
+                    end
 
-                elem = bin
-            end.map {|elem| elem.rjust(16, '0') }
+                    bin
+                end.map {|elem| elem.rjust(16, '0') } # rubocop:disable Style/MultilineBlockChain
 
             ip_chain = ip_slice.join
             prefix = ip_chain[0, prefix_length]
@@ -1213,10 +1373,8 @@ module VCenterDriver
                 cont+=4
             end
 
-            slices
-                .map do |elem|
-                '%0x' % elem.to_i(2) # rubocop:disable Style/FormatString, Style/FormatStringToken
-            end.join.ljust(4, '0')
+            slices.map {|elem| format('%0x', elem.to_i(2)) }
+                  .join.ljust(4, '0')
         end
 
         #  Checks if a RbVmomi::VIM::VirtualDevice is a disk or a cdrom
@@ -1258,7 +1416,7 @@ module VCenterDriver
         end
 
         # @return RbVmomi::VIM::ResourcePool, first resource pool in cluster
-        def rp_get
+        def resource_pool
             self['runtime.host.parent.resourcePool']
         end
 
@@ -1461,7 +1619,7 @@ module VCenterDriver
 
                 # Get datacenter info
                 if !dc_name
-                    dc = get_dc
+                    dc = datacenter
                     dc_name = dc.item.name
                 end
 
@@ -1503,8 +1661,11 @@ module VCenterDriver
                 folders = []
                 until item.instance_of? RbVmomi::VIM::Datacenter
                     item = item.parent
-                    if !item.instance_of? RbVmomi::VIM::Datacenter
-                        folders << item.name if item.name != 'vm'
+                    unless item.instance_of?(
+                        RbVmomi::VIM::Datacenter &&
+                        item.name != 'vm'
+                    )
+                        folders << item.name
                     end
                     if item.nil?
                         raise 'Could not find the templates parent location'
@@ -1755,16 +1916,20 @@ module VCenterDriver
                     template_moref = selected[:vcenter_ref]
                 end
 
+                opts = {
+                    :vi_client => @vi_client,
+                    :vc_uuid => vc_uuid,
+                    :npool => npool,
+                    :hpool => hpool,
+                    :vcenter => vcenter,
+                    :template_moref => template_moref,
+                    :vm_object => nil
+                }
+
                 error, template_nics, _ar_ids, allocated_nets =
                     template
                     .import_vcenter_nics(
-                        @vi_client,
-                        vc_uuid,
-                        npool,
-                        hpool,
-                        vcenter,
-                        template_moref,
-                        nil,
+                        opts,
                         id,
                         dc
                     )
diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/vmm_importer.rb b/src/vmm_mad/remotes/lib/vcenter_driver/vmm_importer.rb
index 0ea9719f8d..6433fa9abd 100644
--- a/src/vmm_mad/remotes/lib/vcenter_driver/vmm_importer.rb
+++ b/src/vmm_mad/remotes/lib/vcenter_driver/vmm_importer.rb
@@ -63,15 +63,19 @@ module VCenterDriver
 
             template << template_disks
 
+            opts = {
+                :vi_client => @vi_client,
+                :vc_uuid => vc_uuid,
+                :npool => npool,
+                :hpool => hpool,
+                :vcenter => vc_name,
+                :template_moref => vm_ref,
+                :vm_object => vc_vm
+            }
+
             # Create images or get nics information for template
-            error, template_nics, ar_ids = vc_vm
-                                           .import_vcenter_nics(@vi_client,
-                                                                vc_uuid,
-                                                                npool,
-                                                                hpool,
-                                                                vc_name,
-                                                                vm_ref,
-                                                                vc_vm)
+            error, template_nics, ar_ids =
+                vc_vm.import_vcenter_nics(opts)
             opts = { :uuid => vc_uuid, :npool => npool, :error => error }
             Raction.delete_ars(ar_ids, opts) unless error.empty?
 
@@ -79,7 +83,7 @@ module VCenterDriver
             template << "VCENTER_ESX_HOST = #{vc_vm['runtime.host.name']}\n"
 
             # Get DS_ID for the deployment, the wild VM needs a System DS
-            dc_ref = vc_vm.get_dc.item._ref
+            dc_ref = vc_vm.datacenter.item._ref
             ds_ref = template.match(/^VCENTER_DS_REF *= *"(.*)" *$/)[1]
 
             ds_one = dpool.select do |e|

From aa839b75a489bc6f313b63cc729d76f68c97fd1c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20Gonz=C3=A1lez?= <cgonzalez@opennebula.io>
Date: Thu, 14 Jan 2021 15:14:51 +0100
Subject: [PATCH 15/18] M #: fix typo in GOCA VM states (#643)

---
 src/oca/go/src/goca/schemas/vm/vm_state.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/oca/go/src/goca/schemas/vm/vm_state.go b/src/oca/go/src/goca/schemas/vm/vm_state.go
index c569112aec..bc88ed33e9 100644
--- a/src/oca/go/src/goca/schemas/vm/vm_state.go
+++ b/src/oca/go/src/goca/schemas/vm/vm_state.go
@@ -444,7 +444,7 @@ func (s LCMState) String() string {
 		return "DISK_RESIZE_UNDEPLOYED"
 	case HotplugNicPoweroff:
 		return "HOTPLUG_NIC_POWEROFF"
-	case HotplugrResize:
+	case HotplugResize:
 		return "HOTPLUG_RESIZE"
 	case HotplugSaveasUndeployed:
 		return "HOTPLUG_SAVEAS_UNDEPLOYED"

From 06c417986860785707757e3fc262613695382efb Mon Sep 17 00:00:00 2001
From: Jan Orel <jorel@opennebula.systems>
Date: Thu, 14 Jan 2021 15:26:00 +0100
Subject: [PATCH 16/18] M #-: Add sudoers-tmp workaround role to provision
 (#642)

This is a temporal, once beta is released this task will be removed.
---
 share/oneprovision/ansible/aws.yml                    |  1 +
 share/oneprovision/ansible/packet.yml                 |  1 +
 .../ansible/roles/sudoers-tmp/files/centos-opennebula |  1 +
 .../ansible/roles/sudoers-tmp/files/debian-opennebula |  1 +
 .../ansible/roles/sudoers-tmp/tasks/main.yml          | 11 +++++++++++
 5 files changed, 15 insertions(+)
 create mode 120000 share/oneprovision/ansible/roles/sudoers-tmp/files/centos-opennebula
 create mode 120000 share/oneprovision/ansible/roles/sudoers-tmp/files/debian-opennebula
 create mode 100644 share/oneprovision/ansible/roles/sudoers-tmp/tasks/main.yml

diff --git a/share/oneprovision/ansible/aws.yml b/share/oneprovision/ansible/aws.yml
index 71b2049ae3..b94ec2d9a7 100644
--- a/share/oneprovision/ansible/aws.yml
+++ b/share/oneprovision/ansible/aws.yml
@@ -23,3 +23,4 @@
       frr_iface: 'eth0'
       # Use /16 for the internal management network address
       frr_prefix_length: 16
+    - sudoers-tmp
diff --git a/share/oneprovision/ansible/packet.yml b/share/oneprovision/ansible/packet.yml
index a10d276e27..d6d70447d6 100644
--- a/share/oneprovision/ansible/packet.yml
+++ b/share/oneprovision/ansible/packet.yml
@@ -24,3 +24,4 @@
       frr_iface: 'bond0_0'
       # Use /25 for the internal management network address
       frr_prefix_length: 25
+    - sudoers-tmp
diff --git a/share/oneprovision/ansible/roles/sudoers-tmp/files/centos-opennebula b/share/oneprovision/ansible/roles/sudoers-tmp/files/centos-opennebula
new file mode 120000
index 0000000000..62a225ce3b
--- /dev/null
+++ b/share/oneprovision/ansible/roles/sudoers-tmp/files/centos-opennebula
@@ -0,0 +1 @@
+../../../../../pkgs/sudoers/centos/opennebula
\ No newline at end of file
diff --git a/share/oneprovision/ansible/roles/sudoers-tmp/files/debian-opennebula b/share/oneprovision/ansible/roles/sudoers-tmp/files/debian-opennebula
new file mode 120000
index 0000000000..fb0612ed8c
--- /dev/null
+++ b/share/oneprovision/ansible/roles/sudoers-tmp/files/debian-opennebula
@@ -0,0 +1 @@
+../../../../../pkgs/sudoers/debian/opennebula
\ No newline at end of file
diff --git a/share/oneprovision/ansible/roles/sudoers-tmp/tasks/main.yml b/share/oneprovision/ansible/roles/sudoers-tmp/tasks/main.yml
new file mode 100644
index 0000000000..65691fb1d3
--- /dev/null
+++ b/share/oneprovision/ansible/roles/sudoers-tmp/tasks/main.yml
@@ -0,0 +1,11 @@
+- name: Overwrite opennebula sudoers (debian)
+  copy:
+    src: debian-opennebula
+    dest: /etc/sudoers.d/opennebula
+  when: ansible_os_family == "Debian"
+
+- name: Overwrite opennebula sudoers (redhat)
+  copy:
+    src: centos-opennebula
+    dest: /etc/sudoers.d/opennebula
+  when: ansible_os_family == "RedHat"

From 4d752c5d06e6a38b9cd597bd7d85b2a420ed3e2f Mon Sep 17 00:00:00 2001
From: Alejandro Huertas Herrero <ahuertas@opennebula.systems>
Date: Thu, 14 Jan 2021 15:27:36 +0100
Subject: [PATCH 17/18] M #-: add JSON and XML output to provision log (#640)

---
 src/cli/one_helper/oneprovision_helper.rb      |  2 +-
 src/cli/oneprovision                           |  6 ++++--
 src/oneprovision/lib/provision/oneprovision.rb | 14 +++++++++++++-
 3 files changed, 18 insertions(+), 4 deletions(-)

diff --git a/src/cli/one_helper/oneprovision_helper.rb b/src/cli/one_helper/oneprovision_helper.rb
index 840a2929ac..982c6eeec8 100644
--- a/src/cli/one_helper/oneprovision_helper.rb
+++ b/src/cli/one_helper/oneprovision_helper.rb
@@ -246,7 +246,7 @@ class OneProvisionHelper < OpenNebulaHelper::OneHelper
                       WAIT_READY,
                       WAIT_TIMEOUT,
                       PROVIDER,
-                      USER_INPUTS]
+                      USER_INPUTS] + [OpenNebulaHelper::FORMAT]
 
     ONE_OPTIONS = CommandParser::OPTIONS +
                   CLIHelper::OPTIONS +
diff --git a/src/cli/oneprovision b/src/cli/oneprovision
index 99d15cfeb1..d82d9fe089 100755
--- a/src/cli/oneprovision
+++ b/src/cli/oneprovision
@@ -189,7 +189,8 @@ CommandParser::CmdParser.new(ARGV) do
             provision_configure_desc,
             :provisionid,
             :options => [OneProvisionHelper::MODES,
-                         OneProvisionHelper::FORCE] do
+                         OneProvisionHelper::FORCE] +
+                            [OpenNebulaHelper::FORMAT] do
         helper.parse_options(options)
 
         rc = helper.configure(args[0], options.key?(:force))
@@ -214,7 +215,8 @@ CommandParser::CmdParser.new(ARGV) do
             :options => [OneProvisionHelper::MODES,
                          OneProvisionHelper::THREADS,
                          OneProvisionHelper::CLEANUP,
-                         OneProvisionHelper::CLEANUP_TIMEOUT] do
+                         OneProvisionHelper::CLEANUP_TIMEOUT] +
+                            [OpenNebulaHelper::FORMAT] do
         helper.parse_options(options)
 
         if options[:cleanup_timeout].nil?
diff --git a/src/oneprovision/lib/provision/oneprovision.rb b/src/oneprovision/lib/provision/oneprovision.rb
index 541dc0c463..6b1bc0be41 100644
--- a/src/oneprovision/lib/provision/oneprovision.rb
+++ b/src/oneprovision/lib/provision/oneprovision.rb
@@ -22,6 +22,7 @@ require 'provision/provision_pool'
 require 'provision/resources'
 require 'provision/utils'
 
+require 'base64'
 require 'logger'
 require 'singleton'
 
@@ -46,7 +47,18 @@ module OneProvision
             format = '%Y-%m-%d %H:%M:%S'
 
             instance.logger.formatter = proc do |severity, datetime, _p, msg|
-                "#{datetime.strftime(format)} #{severity.ljust(5)} : #{msg}\n"
+                if options[:json]
+                    "{ \"timestamp\": \"#{datetime}\", " \
+                    " \"severity\": \"#{severity}\", " \
+                    " \"message\": \"#{Base64.strict_encode64(msg)}\"}\n"
+                elsif options[:xml]
+                    "<TIMESTAMP>#{datetime}</TIMESTAMP>" \
+                    "<SEVERITY>#{severity}</SEVERITY>" \
+                    "<MESSAGE>#{Base64.strict_encode64(msg)}</MESSAGE>\n"
+                else
+                    "#{datetime.strftime(format)} #{severity.ljust(5)} " \
+                    ": #{msg}\n"
+                end
             end
 
             if options.key? :debug

From 000cb0a8418ebbfceabab3bd2e4ad697a95dbc77 Mon Sep 17 00:00:00 2001
From: "Carlos J. Herrera" <cherrera@opennebula.io>
Date: Thu, 14 Jan 2021 10:08:19 -0500
Subject: [PATCH 18/18] F #5222: Use sub folders in VCENTER_VM_FOLDER (#632)

---
 .../lib/vcenter_driver/virtual_machine.rb     | 26 ++++++++++++++++---
 1 file changed, 23 insertions(+), 3 deletions(-)

diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb b/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb
index f7fe3b736e..801c8cea89 100644
--- a/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb
+++ b/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb
@@ -515,6 +515,20 @@ module VCenterDriver
         # Create and reconfigure VM related methods
         ########################################################################
 
+        # This function permit get a folder by name if exist
+        # or create it if not exist
+        def find_or_create_folder(folder_root, name)
+            folder_root.childEntity.each do |child|
+                if child.instance_of? RbVmomi::VIM::Folder
+                    if child.name == name
+                        return child
+                    end
+                end
+            end
+
+            folder_root.CreateFolder(:name => name)
+        end
+
         # This function creates a new VM from the
         # driver_action XML and returns the
         # VMware ref
@@ -563,9 +577,15 @@ module VCenterDriver
 
                 if vcenter_vm_folder_object.nil?
                     begin
-                        dc.item.vmFolder.CreateFolder(
-                            :name => vcenter_vm_folder
-                        )
+                        vcenter_vm_folder_list = vcenter_vm_folder.split("/")
+                        folder_root = dc.item.vmFolder
+
+                        vcenter_vm_folder_list.each do |folder_name|
+                            folder_root = find_or_create_folder(
+                                folder_root,
+                                folder_name
+                            )
+                        end
                     rescue StandardError => e
                         error_message = e.message
 			            if VCenterDriver::CONFIG[:debug_information]