diff --git a/src/sunstone/routes/vcenter.rb b/src/sunstone/routes/vcenter.rb
index 7fc9006a12..cf5c461bb3 100644
--- a/src/sunstone/routes/vcenter.rb
+++ b/src/sunstone/routes/vcenter.rb
@@ -88,7 +88,7 @@ get '/vcenter' do
             error 404, error.to_json
         end
 
-        rs = dc_folder.get_unimported_hosts(hpool)
+        rs = dc_folder.get_unimported_hosts(hpool,vcenter_client.vim.host)
         [200, rs.to_json]
     rescue Exception => e
         logger.error("[vCenter] " + e.message)
@@ -110,7 +110,7 @@ get '/vcenter/templates' do
             error 404, error.to_json
         end
 
-        templates = dc_folder.get_unimported_templates(vcenter_client, tpool)
+        templates = dc_folder.get_unimported_templates(vcenter_client, tpool, vcenter_client.vim.host)
 
         if templates.nil?
             msg = "No datacenter found"
@@ -131,18 +131,86 @@ get '/vcenter/template/:vcenter_ref' do
     begin
         t = {}
         t[:one] = ""
+        template_copy_ref = nil
+        template = nil
+        append = true
+        lc_error = nil
 
-        template = VCenterDriver::VirtualMachine.new_from_ref(params[:vcenter_ref], vcenter_client)
+        ref = params[:vcenter_ref]
 
+        if !ref || ref.empty?
+            msg = "No template ref specified"
+            logger.error("[vCenter] " + msg)
+            error = Error.new(msg)
+            error 404, error.to_json
+        end
+
+        template = VCenterDriver::Template.new_from_ref(ref, vcenter_client)
         vc_uuid = vcenter_client.vim.serviceContent.about.instanceUuid
         dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool)
         ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
         npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool)
 
+        # POST params
+        if @request_body && !@request_body.empty?
+            body_hash = JSON.parse(@request_body)
+            use_linked_clones = body_hash['use_linked_clones'] || false
+            create_copy = body_hash['create_copy'] || false
+            template_name = body_hash['template_name'] || ""
+
+            if !use_linked_clones && (create_copy || !template_name.empty?)
+                msg = "Should not set create template copy or template copy name if not using linked clones"
+                logger.error("[vCenter] " + msg)
+                error = Error.new(msg)
+                error 403, error.to_json
+            end
+
+            if use_linked_clones && !create_copy && !template_name.empty?
+                msg = "Should not set template copy name if create template copy hasn't been selected"
+                logger.error("[vCenter] " + msg)
+                error = Error.new(msg)
+                error 403, error.to_json
+            end
+
+            if create_copy
+
+                lc_error, template_copy_ref = template.create_template_copy(template_name)
+
+                if template_copy_ref
+
+                    template = VCenterDriver::Template.new_from_ref(template_copy_ref, vi_client)
+
+                    one_template = VCenterDriver::Template.get_xml_template(template, vc_uuid, vi_client, vcenter_client.vim.host)
+
+                    if one_template
+
+                        lc_error, use_lc = template.create_delta_disks
+                        if !lc_error
+                            one_template[:one] << "\nVCENTER_LINKED_CLONES=\"YES\"\n"
+                            t = one_template
+                            append = false # t[:one] replaces the current template
+                        end
+                    else
+                        lc_error = "Could not obtain the info from the template's copy"
+                        template.delete_template if template_copy_ref
+                    end
+                end
+
+            else
+                lc_error, use_lc = template.create_delta_disks
+                if !lc_error
+                    append = true
+                    t[:one] << "\nVCENTER_LINKED_CLONES=\"YES\"\n" if use_lc
+                end
+            end
+        end
+
         # Create images or get disks information for template
         error, template_disks = template.import_vcenter_disks(vc_uuid, dpool, ipool)
 
         if !error.empty?
+            append = false
+            template.delete_template if template_copy_ref
             msg = error
             logger.error("[vCenter] " + msg)
             error = Error.new(msg)
@@ -155,6 +223,8 @@ get '/vcenter/template/:vcenter_ref' do
         error, template_nics = template.import_vcenter_nics(vc_uuid, npool)
 
         if !error.empty?
+            append = false
+            template.delete_template if template_copy_ref
             msg = error
             logger.error("[vCenter] " + msg)
             error = Error.new(msg)
@@ -163,8 +233,12 @@ get '/vcenter/template/:vcenter_ref' do
 
         t[:one] << template_nics
 
+        t[:lc_error] = lc_error
+        t[:append] = append
+
         [200, t.to_json]
     rescue Exception => e
+        template.delete_template if template_copy_ref
         logger.error("[vCenter] " + e.message)
         error = Error.new(e.message)
         error 403, error.to_json
@@ -184,7 +258,7 @@ get '/vcenter/networks' do
             error 404, error.to_json
         end
 
-        networks = dc_folder.get_unimported_networks(npool)
+        networks = dc_folder.get_unimported_networks(npool,vcenter_client.vim.host)
 
         if networks.nil?
             msg = "No datacenter found"
@@ -231,16 +305,27 @@ get '/vcenter/datastores' do
     begin
         dc_folder = VCenterDriver::DatacenterFolder.new(vcenter_client)
 
-        hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false)
+        dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false)
 
-        if hpool.respond_to?(:message)
-            msg = "Could not get OpenNebula DatastorePool: #{hpool.message}"
+        if dpool.respond_to?(:message)
+            msg = "Could not get OpenNebula DatastorePool: #{dpool.message}"
             logger.error("[vCenter] " + msg)
             error = Error.new(msg)
             error 404, error.to_json
         end
 
-        datastores = dc_folder.get_unimported_datastores(hpool)
+
+        hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false)
+
+        if hpool.respond_to?(:message)
+            msg = "Could not get OpenNebula HostPool: #{hpool.message}"
+            logger.error("[vCenter] " + msg)
+            error = Error.new(msg)
+            error 404, error.to_json
+        end
+
+
+        datastores = dc_folder.get_unimported_datastores(dpool, vcenter_client.vim.host, hpool)
         if datastores.nil?
             msg = "No datacenter found"
             logger.error("[vCenter] " + msg)
diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/datacenter.rb b/src/vmm_mad/remotes/lib/vcenter_driver/datacenter.rb
index 22314ce46c..af9177af45 100644
--- a/src/vmm_mad/remotes/lib/vcenter_driver/datacenter.rb
+++ b/src/vmm_mad/remotes/lib/vcenter_driver/datacenter.rb
@@ -1,5 +1,5 @@
 module VCenterDriver
-
+require 'set'
 class DatacenterFolder
     attr_accessor :items
 
@@ -38,57 +38,11 @@ class DatacenterFolder
         @vi_client.vim.serviceContent.about.instanceUuid
     end
 
-    def get_vcenter_instance_name
-        @vi_client.vim.serviceContent.setting.setting.select{|set| set.key == 'VirtualCenter.InstanceName'}.first.value rescue nil
-    end
-
     def get_vcenter_api_version
         @vi_client.vim.serviceContent.about.apiVersion
     end
 
-    def get_clusters
-
-        clusters = {}
-
-        vcenter_uuid = get_vcenter_instance_uuid
-
-        pool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false)
-        if pool.respond_to?(:message)
-            raise "Could not get OpenNebula Pool: #{pool.message}"
-        end
-
-        fetch! if @items.empty? #Get datacenters
-
-        # Add datacenter to hash and store in an array all clusters
-        @items.values.each do |dc|
-            dc_name = dc.item.name
-            clusters[dc_name] = []
-
-            host_folder = dc.host_folder
-            host_folder.fetch_clusters!
-
-            host_folder.items.values.each do |ccr|
-                cluster = {}
-                cluster[:ref]  = ccr['_ref']
-                cluster[:name] = ccr['name']
-                attribute = "TEMPLATE/VCENTER_CCR_REF"
-                one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
-                                                               attribute,
-                                                               ccr['_ref'],
-                                                               vcenter_uuid,
-                                                               pool)
-
-                next if one_host.nil? #Cluster hasn't been imported'
-
-                cluster[:host_id] = one_host['ID']
-                clusters[dc_name] << cluster
-            end
-        end
-
-        clusters
-    end
-
-    def get_unimported_hosts(hpool)
+    def get_unimported_hosts(hpool, vcenter_instance_name)
         host_objects = {}
 
         vcenter_uuid = get_vcenter_instance_uuid
@@ -116,7 +70,8 @@ class DatacenterFolder
                 rpools = cluster.get_resource_pool_list.select {|rp| !rp[:name].empty?}
 
                 host_info = {}
-                host_info[:cluster_name]     = ccr['name'].tr(" ", "_")
+                cluster_name = "[#{vcenter_instance_name} - #{dc_name}] #{ccr['name'].tr(" ", "_")}"
+                host_info[:cluster_name]     = cluster_name
                 host_info[:cluster_ref]      = ccr['_ref']
                 host_info[:vcenter_uuid]     = vcenter_uuid
                 host_info[:vcenter_version]  = vcenter_version
@@ -129,19 +84,41 @@ class DatacenterFolder
         return host_objects
     end
 
-    def get_unimported_datastores(hpool)
+    def get_unimported_datastores(dpool, vcenter_instance_name, hpool)
+
         ds_objects = {}
 
         vcenter_uuid = get_vcenter_instance_uuid
 
-        vcenter_instance_name = get_vcenter_instance_name
-
         fetch! if @items.empty? #Get datacenters
 
-        one_clusters = get_clusters
+        one_clusters = {}
 
         @items.values.each do |dc|
             dc_name = dc.item.name
+
+            one_clusters[dc_name] = []
+
+            host_folder = dc.host_folder
+            host_folder.fetch_clusters!
+
+            host_folder.items.values.each do |ccr|
+                cluster = {}
+                cluster[:ref]  = ccr['_ref']
+                cluster[:name] = ccr['name']
+                attribute = "TEMPLATE/VCENTER_CCR_REF"
+                one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
+                                                               attribute,
+                                                               ccr['_ref'],
+                                                               vcenter_uuid,
+                                                               hpool)
+
+                if !!one_host
+                    one_clusters[:host_id] = one_host['ID']
+                    one_clusters[dc_name] << cluster
+                end
+            end
+
             next if one_clusters[dc_name].empty? #No clusters imported, continue
             ds_objects[dc_name] = []
 
@@ -155,20 +132,21 @@ class DatacenterFolder
                     clusters_in_ds = {}
 
                     hosts_in_ds.each do |host|
-                        if !clusters_in_ds[host.key.parent._ref.to_s]
-                            clusters_in_ds[host.key.parent._ref.to_s] = host.key.parent.name
+                        cluster_ref = host.key.parent._ref
+                        if !clusters_in_ds[cluster_ref]
+                            clusters_in_ds[cluster_ref] = host.key.parent.name
                         end
                     end
 
                     clusters_in_ds.each do |ccr_ref, ccr_name|
-                        already_image_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "IMAGE_DS", hpool)
+                        already_image_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "IMAGE_DS", dpool)
 
                         if !already_image_ds
                             object = ds.to_one_template(one_clusters[dc_name], ccr_ref, ccr_name, "IMAGE_DS", vcenter_uuid, vcenter_instance_name, dc_name)
                             ds_objects[dc_name] << object if !object.nil?
                         end
 
-                        already_system_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "SYSTEM_DS", hpool)
+                        already_system_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "SYSTEM_DS", dpool)
 
                         if !already_system_ds
                             object = ds.to_one_template(one_clusters[dc_name], ccr_ref, ccr_name, "SYSTEM_DS", vcenter_uuid, vcenter_instance_name, dc_name)
@@ -184,14 +162,15 @@ class DatacenterFolder
                     ds_in_spod.each do |sp_ds|
                         hosts_in_ds = sp_ds.host
                         hosts_in_ds.each do |host|
-                            if !clusters_in_spod[host.key.parent._ref.to_s]
-                                clusters_in_spod[host.key.parent._ref.to_s] = host.key.parent.name
+                            cluster_ref = host.key.parent._ref
+                            if !clusters_in_spod[cluster_ref]
+                                clusters_in_spod[cluster_ref] = host.key.parent.name
                             end
                         end
                     end
 
                     clusters_in_spod.each do |ccr_ref, ccr_name|
-                        already_system_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "SYSTEM_DS", hpool)
+                        already_system_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "SYSTEM_DS", dpool)
 
                         if !already_system_ds
                             object = ds.to_one_template(one_clusters[dc_name], ccr_ref, ccr_name, "SYSTEM_DS", vcenter_uuid, vcenter_instance_name, dc_name)
@@ -201,7 +180,6 @@ class DatacenterFolder
                 end
             end
         end
-
         ds_objects
     end
 
@@ -209,77 +187,120 @@ class DatacenterFolder
         template_objects = {}
         vcenter_uuid = get_vcenter_instance_uuid
 
+        vcenter_instance_name = vi_client.vim.host
+
         fetch! if @items.empty? #Get datacenters
 
         @items.values.each do |dc|
-
+            rp_cache = {}
             dc_name = dc.item.name
             template_objects[dc_name] = []
 
-            #Get templates defined in a datacenter
-            vm_folder = dc.vm_folder
-            vm_folder.fetch_templates!
-            vm_folder.items.values.each do |template|
+
+            view = vi_client.vim.serviceContent.viewManager.CreateContainerView({
+                container: dc.item.vmFolder,
+                type:      ['VirtualMachine'],
+                recursive: true
+            })
+
+            pc = vi_client.vim.serviceContent.propertyCollector
+
+            filterSpec = RbVmomi::VIM.PropertyFilterSpec(
+            :objectSet => [
+                :obj => view,
+                :skip => true,
+                :selectSet => [
+                RbVmomi::VIM.TraversalSpec(
+                    :name => 'traverseEntities',
+                    :type => 'ContainerView',
+                    :path => 'view',
+                    :skip => false
+                )
+                ]
+            ],
+            :propSet => [
+                { :type => 'VirtualMachine', :pathSet => ['config.template'] }
+            ]
+            )
+
+            result = pc.RetrieveProperties(:specSet => [filterSpec])
+
+            vms = {}
+                result.each do |r|
+                vms[r.obj._ref] = r.to_hash if r.obj.is_a?(RbVmomi::VIM::VirtualMachine)
+            end
+            templates = []
+            vms.each do |ref,value|
+                if value["config.template"]
+                    templates << VCenterDriver::Template.new_from_ref(ref, vi_client)
+                end
+            end
+
+            view.DestroyView # Destroy the view
+
+
+            templates.each do |template|
+
                 one_template = VCenterDriver::VIHelper.find_by_ref(OpenNebula::TemplatePool,
                                                                    "TEMPLATE/VCENTER_TEMPLATE_REF",
                                                                    template['_ref'],
                                                                    vcenter_uuid,
                                                                    tpool)
+
                 next if one_template #If the template has been already imported
 
-                template_ccr  = template['runtime.host.parent']
+                one_template = VCenterDriver::Template.get_xml_template(template, vcenter_uuid, vi_client, vcenter_instance_name, dc_name, rp_cache)
 
-                # Check if template has nics or disks to be imported later
-                has_nics_and_disks = false
+                template_objects[dc_name] << one_template if !!one_template
+            end
+        end
 
-                template["config.hardware.device"].each do |device|
-                    if VCenterDriver::Storage.is_disk_or_iso?(device)
-                        has_nics_and_disks = true
-                        break
-                    end
-
-                    if VCenterDriver::Network.is_nic?(device)
-                        has_nics_and_disks = true
-                        break
-                    end
-                end
-
-                #Get resource pools
-                rp_cache = {}
-                if !rp_cache[template_ccr.name.to_s]
-                    tmp_cluster = VCenterDriver::ClusterComputeResource.new_from_ref(template_ccr._ref, vi_client)
-                    rp_list = tmp_cluster.get_resource_pool_list
-                    rp = ""
-                    if !rp_list.empty?
-                        rp_name_list = []
-                        rp_list.each do |rp_hash|
-                            rp_name_list << rp_hash[:name]
-                        end
-                        rp =  "O|list|Which resource pool you want this VM to run in? "
-                        rp << "|#{rp_name_list.join(",")}" #List of RP
-                        rp << "|#{rp_name_list.first}" #Default RP
-                    end
-                    rp_cache[template_ccr.name.to_s] = rp
-                end
-                rp = rp_cache[template_ccr.name.to_s]
-
-                object = template.to_one_template(template,
-                                                  has_nics_and_disks,
-                                                  rp,
-                                                  rp_list,
-                                                  vcenter_uuid)
-
-                template_objects[dc_name] << object if !object.nil?
-            end #template loop
-        end #datacenter loop
-        return template_objects
+        template_objects
     end
 
-    def get_unimported_networks(npool)
+    def get_unimported_networks(npool,vcenter_instance_name)
 
         network_objects = {}
         vcenter_uuid = get_vcenter_instance_uuid
 
+        pc = @vi_client.vim.serviceContent.propertyCollector
+
+        #Get all port groups and distributed port groups in vcenter instance
+        view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
+                container: @vi_client.vim.rootFolder,
+                type:      ['Network','DistributedVirtualPortgroup'],
+                recursive: true
+        })
+
+        filterSpec = RbVmomi::VIM.PropertyFilterSpec(
+            :objectSet => [
+                :obj => view,
+                :skip => true,
+                :selectSet => [
+                RbVmomi::VIM.TraversalSpec(
+                    :name => 'traverseEntities',
+                    :type => 'ContainerView',
+                    :path => 'view',
+                    :skip => false
+                )
+                ]
+            ],
+            :propSet => [
+                { :type => 'Network', :pathSet => ['name'] },
+                { :type => 'DistributedVirtualPortgroup', :pathSet => ['name'] }
+            ]
+        )
+
+        result = pc.RetrieveProperties(:specSet => [filterSpec])
+
+        networks = {}
+            result.each do |r|
+            networks[r.obj._ref] = r.to_hash if r.obj.is_a?(RbVmomi::VIM::DistributedVirtualPortgroup) || r.obj.is_a?(RbVmomi::VIM::Network)
+            networks[r.obj._ref][:network_type] = r.obj.is_a?(RbVmomi::VIM::DistributedVirtualPortgroup) ? "Distributed Port Group" : "Port Group"
+        end
+
+        view.DestroyView # Destroy the view
+
         fetch! if @items.empty? #Get datacenters
 
         @items.values.each do |dc|
@@ -287,32 +308,66 @@ class DatacenterFolder
             dc_name = dc.item.name
             network_objects[dc_name] = []
 
-            #Get networks defined in a datacenter
-            network_folder = dc.network_folder
-            network_folder.fetch!
-            network_folder.items.values.each do |network|
+            view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
+                container: dc.item,
+                type:      ['ClusterComputeResource'],
+                recursive: true
+            })
 
-                next if network.instance_of? VCenterDriver::DistributedVirtualSwitch
+            filterSpec = RbVmomi::VIM.PropertyFilterSpec(
+                :objectSet => [
+                    :obj => view,
+                    :skip => true,
+                    :selectSet => [
+                    RbVmomi::VIM.TraversalSpec(
+                        :name => 'traverseEntities',
+                        :type => 'ContainerView',
+                        :path => 'view',
+                        :skip => false
+                    )
+                    ]
+                ],
+                :propSet => [
+                    { :type => 'ClusterComputeResource', :pathSet => ['name','network'] }
+                ]
+            )
 
-                one_network = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool,
-                                                                  "TEMPLATE/VCENTER_NET_REF",
-                                                                  network['_ref'],
-                                                                  vcenter_uuid,
-                                                                  npool)
-                next if one_network #If the network has been already imported
+            result = pc.RetrieveProperties(:specSet => [filterSpec])
 
-                network_name = network['name']
-                network_ref  = network['_ref']
+            clusters = {}
+                result.each do |r|
+                clusters[r.obj._ref] = r.to_hash if r.obj.is_a?(RbVmomi::VIM::ClusterComputeResource)
+            end
+
+            view.DestroyView # Destroy the view
+
+            clusters.each do |ref, info|
+
+                network_obj = info['network']
+
+                network_obj.each do |n|
+                    network_ref  = n._ref
+                    network_name = networks[network_ref]['name']
+                    network_type = networks[network_ref][:network_type]
+
+                    one_network = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool,
+                                                                    "TEMPLATE/VCENTER_NET_REF",
+                                                                    network_ref,
+                                                                    vcenter_uuid,
+                                                                    npool)
+                    next if one_network #If the network has been already imported
 
-                network.clusters.each do |ccr_ref, ccr_name|
                     one_vnet = VCenterDriver::Network.to_one_template(network_name,
-                                                                      network_ref,
-                                                                      network.network_type,
-                                                                      ccr_ref,
-                                                                      ccr_name,
-                                                                      vcenter_uuid)
+                                                                        network_ref,
+                                                                        network_type,
+                                                                        ref,
+                                                                        info['name'],
+                                                                        vcenter_uuid,
+                                                                        vcenter_instance_name,
+                                                                        dc_name)
                     network_objects[dc_name] << one_vnet
-                end #network clusters loop
+                end
+
             end # network loop
         end #datacenters loop
 
diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb b/src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb
index 537017fa87..cc353ff3e9 100644
--- a/src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb
+++ b/src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb
@@ -160,19 +160,21 @@ class Storage
 
         return nil if one_cluster.nil?
 
+        name, capacity, freeSpace = @item.collect("name","summary.capacity","summary.freeSpace")
+
         ds_name = ""
 
         if type == "IMAGE_DS"
-            ds_name << "[#{vcenter_instance_name} - #{dc_name}] #{self['name']} - #{ccr_name} (IMG)"
+            ds_name << "[#{vcenter_instance_name} - #{dc_name}] #{name} - #{ccr_name.tr(" ", "_")} (IMG)"
         else
-            ds_name << "[#{vcenter_instance_name} - #{dc_name}] #{self['name']} - #{ccr_name} (SYS)"
+            ds_name << "[#{vcenter_instance_name} - #{dc_name}] #{name} - #{ccr_name.tr(" ", "_")} (SYS)"
             ds_name << " [StorDRS]" if self.class == VCenterDriver::StoragePod
         end
 
         one_tmp = {
             :name     => ds_name,
-            :total_mb => ((self['summary.capacity'].to_i / 1024) / 1024),
-            :free_mb  => ((self['summary.freeSpace'].to_i / 1024) / 1024),
+            :total_mb => ((capacity.to_i / 1024) / 1024),
+            :free_mb  => ((freeSpace.to_i / 1024) / 1024),
             :cluster  => ccr_name,
             :one  => to_one(ds_name, vcenter_uuid, ccr_ref, one_cluster[:host_id])
         }
diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/host.rb b/src/vmm_mad/remotes/lib/vcenter_driver/host.rb
index f4f5763bb6..e526b11e25 100644
--- a/src/vmm_mad/remotes/lib/vcenter_driver/host.rb
+++ b/src/vmm_mad/remotes/lib/vcenter_driver/host.rb
@@ -27,12 +27,14 @@ end # class HostFolder
 
 class ClusterComputeResource
     attr_accessor :item
+    attr_accessor :rp_list
 
     include Memoize
 
     def initialize(item, vi_client=nil)
         @item = item
         @vi_client = vi_client
+        @rp_list
     end
 
     def fetch_resource_pools(rp, rp_array = [])
@@ -53,51 +55,54 @@ class ClusterComputeResource
         @resource_pools
     end
 
-    def get_resource_pool_list(rp = nil, parent_prefix = "", rp_array = [])
-
+    def get_resource_pool_list(rp = @item.resourcePool, parent_prefix = "", rp_array = [])
         current_rp = ""
 
-        if rp.nil?
-            rp = @item.resourcePool
-        else
-            if !parent_prefix.empty?
-                current_rp << parent_prefix
-                current_rp << "/"
-            end
-            current_rp << rp.name
+        if !parent_prefix.empty?
+            current_rp << parent_prefix
+            current_rp << "/"
         end
 
-        if rp.resourcePool.size == 0
-            rp_info = {}
-            rp_info[:name] = current_rp
-            rp_info[:ref]  = rp._ref
-            rp_array << rp_info
-        else
-            rp.resourcePool.each do |child_rp|
-                get_resource_pool_list(child_rp, current_rp, rp_array)
-            end
-            rp_info = {}
-            rp_info[:name] = current_rp
-            rp_info[:ref]  = rp._ref
-            rp_array << rp_info if !current_rp.empty?
+        resource_pool, name = rp.collect("resourcePool","name")
+        current_rp << name if name != "Resources"
+
+        resource_pool.each do |child_rp|
+            get_resource_pool_list(child_rp, current_rp, rp_array)
         end
 
+        rp_info = {}
+        rp_info[:name] = current_rp
+        rp_info[:ref]  = rp._ref
+        rp_array << rp_info if !current_rp.empty?
+
         rp_array
     end
 
     def monitor
-        #Load the host systems
-        summary = @item.summary
+        total_cpu,
+        num_cpu_cores,
+        effective_cpu,
+        total_memory,
+        effective_mem,
+        num_hosts,
+        num_eff_hosts = @item.collect("summary.totalCpu",
+                                      "summary.numCpuCores",
+                                      "summary.effectiveCpu",
+                                      "summary.totalMemory",
+                                      "summary.effectiveMemory",
+                                      "summary.numHosts",
+                                      "summary.numEffectiveHosts"
+        )
 
-        mhz_core = summary.totalCpu.to_f / summary.numCpuCores.to_f
-        eff_core = summary.effectiveCpu.to_f / mhz_core
+        mhz_core = total_cpu.to_f / num_cpu_cores.to_f
+        eff_core = effective_cpu.to_f / mhz_core
 
         free_cpu  = sprintf('%.2f', eff_core * 100).to_f
-        total_cpu = summary.numCpuCores.to_f * 100
+        total_cpu = num_cpu_cores.to_f * 100
         used_cpu  = sprintf('%.2f', total_cpu - free_cpu).to_f
 
-        total_mem = summary.totalMemory.to_i / 1024
-        free_mem  = summary.effectiveMemory.to_i * 1024
+        total_mem = total_memory.to_i / 1024
+        free_mem  = effective_mem.to_i * 1024
 
         str_info = ""
 
@@ -106,8 +111,8 @@ class ClusterComputeResource
 
         # System
         str_info << "HYPERVISOR=vcenter\n"
-        str_info << "TOTALHOST=" << summary.numHosts.to_s << "\n"
-        str_info << "AVAILHOST=" << summary.numEffectiveHosts.to_s << "\n"
+        str_info << "TOTALHOST=" << num_hosts.to_s << "\n"
+        str_info << "AVAILHOST=" << num_eff_hosts.to_s << "\n"
 
         # CPU
         str_info << "CPUSPEED=" << mhz_core.to_s   << "\n"
@@ -118,37 +123,90 @@ class ClusterComputeResource
         # Memory
         str_info << "TOTALMEMORY=" << total_mem.to_s << "\n"
         str_info << "FREEMEMORY="  << free_mem.to_s << "\n"
-        str_info << "USEDMEMORY="  << (total_mem - free_mem).to_s
+        str_info << "USEDMEMORY="  << (total_mem - free_mem).to_s << "\n"
 
+        str_info << "VCENTER_LAST_PERF_POLL=" << Time.now.to_i.to_s << "\n"
 
-        str_info << monitor_resource_pools(@item.resourcePool, "", mhz_core)
+        str_info << monitor_resource_pools(mhz_core)
     end
 
-    def monitor_resource_pools(parent_rp, parent_prefix, mhz_core)
-        return "" if parent_rp.resourcePool.size == 0
+    def monitor_resource_pools(mhz_core)
+
+        @rp_list = get_resource_pool_list
+
+        view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
+            container: @item, #View for RPs inside this cluster
+            type:      ['ResourcePool'],
+            recursive: true
+        })
+
+        pc = @vi_client.vim.serviceContent.propertyCollector
+
+        monitored_properties = [
+            "config.cpuAllocation.expandableReservation",
+            "config.cpuAllocation.limit",
+            "config.cpuAllocation.reservation",
+            "config.cpuAllocation.shares.level",
+            "config.cpuAllocation.shares.shares",
+            "config.memoryAllocation.expandableReservation",
+            "config.memoryAllocation.limit",
+            "config.memoryAllocation.reservation",
+            "config.memoryAllocation.shares.level",
+            "config.memoryAllocation.shares.shares"
+        ]
+
+        filterSpec = RbVmomi::VIM.PropertyFilterSpec(
+            :objectSet => [
+                :obj => view,
+                :skip => true,
+                :selectSet => [
+                RbVmomi::VIM.TraversalSpec(
+                    :name => 'traverseEntities',
+                    :type => 'ContainerView',
+                    :path => 'view',
+                    :skip => false
+                )
+                ]
+            ],
+            :propSet => [
+                { :type => 'ResourcePool', :pathSet => monitored_properties }
+            ]
+        )
+
+        result = pc.RetrieveProperties(:specSet => [filterSpec])
+
+        rps = {}
+        result.each do |r|
+            hashed_properties = r.to_hash
+            if r.obj.is_a?(RbVmomi::VIM::ResourcePool)
+                rps[r.obj._ref] = hashed_properties
+            end
+        end
+
+        return "" if rps.empty?
 
         rp_info = ""
 
-        parent_rp.resourcePool.each{|rp|
-            rpcpu     = rp.config.cpuAllocation
-            rpmem     = rp.config.memoryAllocation
+        rps.each{|ref, info|
+
             # CPU
-            cpu_expandable   = rpcpu.expandableReservation ? "YES" : "NO"
-            cpu_limit        = rpcpu.limit == "-1" ? "UNLIMITED" : rpcpu.limit
-            cpu_reservation  = rpcpu.reservation
-            cpu_num          = rpcpu.reservation.to_f / mhz_core
-            cpu_shares_level = rpcpu.shares.level
-            cpu_shares       = rpcpu.shares.shares
+            cpu_expandable   = info["config.cpuAllocation.expandableReservation"] ? "YES" : "NO"
+            cpu_limit        = info["config.cpuAllocation.limit"] == "-1" ? "UNLIMITED" : info["config.cpuAllocation.limit"]
+            cpu_reservation  = info["config.cpuAllocation.reservation"]
+            cpu_num          = cpu_reservation.to_f / mhz_core
+            cpu_shares_level = info["config.cpuAllocation.shares.level"]
+            cpu_shares       = info["config.cpuAllocation.shares.shares"]
 
             # MEMORY
-            mem_expandable   = rpmem.expandableReservation ? "YES" : "NO"
-            mem_limit        = rpmem.limit == "-1" ? "UNLIMITED" : rpmem.limit
-            mem_reservation  = rpmem.reservation.to_f
-            mem_shares_level = rpmem.shares.level
-            mem_shares       = rpmem.shares.shares
+            mem_expandable   = info["config.memoryAllocation.expandableReservation"] ? "YES" : "NO"
+            mem_limit        = info["config.memoryAllocation.limit"] == "-1" ? "UNLIMITED" : info["config.memoryAllocation.limit"]
+            mem_reservation  = info["config.memoryAllocation.reservation"].to_f
+            mem_shares_level = info["config.memoryAllocation.shares.level"]
+            mem_shares       = info["config.memoryAllocation.shares.shares"]
 
-            rp_name          = (parent_prefix.empty? ? "" : parent_prefix + "/")
-            rp_name         += rp.name
+            rp_name = rp_list.select { |item| item[:ref] == ref}.first[:name] rescue ""
+
+            rp_name = "Resources" if rp_name.empty?
 
             rp_info << "\nVCENTER_RESOURCE_POOL_INFO = ["
             rp_info << "NAME=\"#{rp_name}\","
@@ -164,39 +222,80 @@ class ClusterComputeResource
             rp_info << "MEM_SHARES=#{mem_shares},"
             rp_info << "MEM_SHARES_LEVEL=#{mem_shares_level}"
             rp_info << "]"
-
-            if rp.resourcePool.size != 0
-               rp_info << monitor_resource_pools(rp, rp_name, mhz_core)
-            end
         }
 
+        view.DestroyView
+
         return rp_info
     end
 
     def monitor_host_systems
         host_info = ""
 
-        @item.host.each do |h|
-            next if h.runtime.connectionState != "connected"
+        view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
+            container: @item, #View for Hosts inside this cluster
+            type:      ['HostSystem'],
+            recursive: true
+        })
 
-            summary = h.summary
-            hw      = summary.hardware
-            stats   = summary.quickStats
+        pc = @vi_client.vim.serviceContent.propertyCollector
 
-            total_cpu = hw.numCpuCores * 100
-            used_cpu  = (stats.overallCpuUsage.to_f / hw.cpuMhz.to_f) * 100
+        monitored_properties = [
+            "name",
+            "runtime.connectionState",
+            "summary.hardware.numCpuCores",
+            "summary.hardware.memorySize",
+            "summary.hardware.cpuModel",
+            "summary.hardware.cpuMhz",
+            "summary.quickStats.overallCpuUsage",
+            "summary.quickStats.overallMemoryUsage"
+        ]
+
+        filterSpec = RbVmomi::VIM.PropertyFilterSpec(
+            :objectSet => [
+                :obj => view,
+                :skip => true,
+                :selectSet => [
+                RbVmomi::VIM.TraversalSpec(
+                    :name => 'traverseEntities',
+                    :type => 'ContainerView',
+                    :path => 'view',
+                    :skip => false
+                )
+                ]
+            ],
+            :propSet => [
+                { :type => 'HostSystem', :pathSet => monitored_properties }
+            ]
+        )
+
+        result = pc.RetrieveProperties(:specSet => [filterSpec])
+
+        hosts = {}
+        result.each do |r|
+            hashed_properties = r.to_hash
+            if r.obj.is_a?(RbVmomi::VIM::HostSystem)
+                hosts[r.obj._ref] = hashed_properties
+            end
+        end
+
+        hosts.each do |ref, info|
+            next if info["runtime.connectionState"] != "connected"
+
+            total_cpu = info["summary.hardware.numCpuCores"] * 100
+            used_cpu  = (info["summary.quickStats.overallCpuUsage"].to_f / info["summary.hardware.cpuMhz"].to_f) * 100
             used_cpu  = sprintf('%.2f', used_cpu).to_f # Trim precission
             free_cpu  = total_cpu - used_cpu
 
-            total_memory = hw.memorySize/1024
-            used_memory  = stats.overallMemoryUsage*1024
+            total_memory = info["summary.hardware.memorySize"]/1024
+            used_memory  = info["summary.quickStats.overallMemoryUsage"]*1024
             free_memory  = total_memory - used_memory
 
             host_info << "\nHOST=["
             host_info << "STATE=on,"
-            host_info << "HOSTNAME=\""  << h.name.to_s       << "\","
-            host_info << "MODELNAME=\"" << hw.cpuModel.to_s  << "\","
-            host_info << "CPUSPEED="    << hw.cpuMhz.to_s    << ","
+            host_info << "HOSTNAME=\""  << info["name"].to_s       << "\","
+            host_info << "MODELNAME=\"" << info["summary.hardware.cpuModel"].to_s  << "\","
+            host_info << "CPUSPEED="    << info["summary.hardware.cpuMhz"].to_s    << ","
             host_info << "MAX_CPU="     << total_cpu.to_s    << ","
             host_info << "USED_CPU="    << used_cpu.to_s     << ","
             host_info << "FREE_CPU="    << free_cpu.to_s     << ","
@@ -206,88 +305,195 @@ class ClusterComputeResource
             host_info << "]"
         end
 
+        view.DestroyView # Destroy the view
+
         return host_info
     end
 
     def monitor_vms
-        str_info = ""
+
+        vc_uuid = @vi_client.vim.serviceContent.about.instanceUuid
+        cluster_name = self["name"]
+        cluster_ref = self["_ref"]
+
+        # Get info of the host where the VM/template is located
+        host_id = nil
+        one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
+                                                       "TEMPLATE/VCENTER_CCR_REF",
+                                                       cluster_ref,
+                                                       vc_uuid)
+        host_id = one_host["ID"] if one_host
+
+
+        # Extract CPU info for each esx host in cluster
+        esx_host_cpu = {}
+        @item.host.each do |esx_host|
+            esx_host_cpu[esx_host._ref] = esx_host.summary.hardware.cpuMhz.to_f
+        end
+
         @monitored_vms = Set.new
-        resource_pools.each do |rp|
-            str_info << monitor_vms_in_rp(rp)
-        end
-
-        return str_info
-    end
-
-    def monitor_vms_in_rp(rp)
         str_info = ""
 
-        host_pool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool)
+        view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
+            container: @item, #View for VMs inside this cluster
+            type:      ['VirtualMachine'],
+            recursive: true
+        })
 
-        ccr_host = {}
-        host_pool.each do |host|
-            ccr = host['TEMPLATE/VCENTER_CCR_REF']
-            ccr_host[ccr] = host['ID'] if ccr
+        pc = @vi_client.vim.serviceContent.propertyCollector
+
+        monitored_properties = [
+            "name", #VM name
+            "config.template", #To filter out templates
+            "summary.runtime.powerState", #VM power state
+            "summary.quickStats.hostMemoryUsage", #Memory usage
+            "summary.quickStats.overallCpuUsage", #CPU used by VM
+            "runtime.host", #ESX host
+            "resourcePool", #RP
+            "guest.guestFullName",
+            "guest.net", #IP addresses as seen by guest tools,
+            "guest.guestState",
+            "guest.toolsVersion",
+            "guest.toolsRunningStatus",
+            "guest.toolsVersionStatus2", #IP addresses as seen by guest tools,
+            "config.extraConfig", #VM extraconfig info e.g opennebula.vm.running
+            "config.hardware.numCPU",
+            "config.hardware.memoryMB",
+            "config.annotation"
+        ]
+
+        filterSpec = RbVmomi::VIM.PropertyFilterSpec(
+            :objectSet => [
+                :obj => view,
+                :skip => true,
+                :selectSet => [
+                RbVmomi::VIM.TraversalSpec(
+                    :name => 'traverseEntities',
+                    :type => 'ContainerView',
+                    :path => 'view',
+                    :skip => false
+                )
+                ]
+            ],
+            :propSet => [
+                { :type => 'VirtualMachine', :pathSet => monitored_properties }
+            ]
+        )
+
+        result = pc.RetrieveProperties(:specSet => [filterSpec])
+
+        vms = {}
+        vm_objects = []
+        result.each do |r|
+            hashed_properties = r.to_hash
+            if r.obj.is_a?(RbVmomi::VIM::VirtualMachine)
+                #Only take care of VMs, not templates
+                if !hashed_properties["config.template"]
+                    vms[r.obj._ref] = hashed_properties
+                    vm_objects << r.obj
+                end
+            end
         end
 
-        rp.vm.each do |v|
+        pm = @vi_client.vim.serviceContent.perfManager
+
+        stats = []
+
+        max_samples = 9
+        refresh_rate = 20 #Real time stats takes samples every 20 seconds
+
+        last_mon_time = one_host["TEMPLATE/VCENTER_LAST_PERF_POLL"]
+
+        if last_mon_time
+            interval = (Time.now.to_i - last_mon_time.to_i)
+            interval = 3601 if interval < 0
+            samples = (interval / refresh_rate)
+            samples = 1 if samples == 0
+            max_samples =  interval > 3600 ? 9 : samples
+        end
+
+        stats = pm.retrieve_stats(
+                vm_objects,
+                ['net.transmitted','net.bytesRx','net.bytesTx','net.received',
+                'virtualDisk.numberReadAveraged','virtualDisk.numberWriteAveraged',
+                'virtualDisk.read','virtualDisk.write'],
+                {max_samples: max_samples}
+        )
+
+        get_resource_pool_list if !@rp_list
+
+        vms.each do |vm_ref,info|
             begin
-                vm = VirtualMachine.new(v, @vi_client)
+                vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, @vi_client)
+                info[:cluster_name] = cluster_name
+                info[:cluster_ref] = cluster_ref
+                info[:vc_uuid] = vc_uuid
+                info[:host_id] = host_id
+                info[:rp_list] = @rp_list
+
+                vm.vm_info = info
 
                 number = -1
 
                 # Check the running flag
-                running_flag = vm["config.extraConfig"].select do |val|
+                running_flag = info["config.extraConfig"].select do |val|
                     val[:key] == "opennebula.vm.running"
                 end
 
-                if running_flag.size > 0 and running_flag[0]
+                if !running_flag.empty? && running_flag.first
                     running_flag = running_flag[0][:value]
                 end
 
                 next if running_flag == "no"
 
                 # Extract vmid if possible
-                matches = vm["name"].match(/^one-(\d*)(-(.*))?$/)
+                matches = info["name"].match(/^one-(\d*)(-(.*))?$/)
                 number  = matches[1] if matches
 
                 # Extract vmid from ref and vcenter instance uuid if possible
-                vm_id = vm.get_vm_id
-
-                number = vm_id if vm_id
+                if number == -1
+                    one_vm = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualMachinePool,
+                                                                "DEPLOY_ID",
+                                                                vm_ref,
+                                                                vc_uuid)
+                    number = one_vm["ID"] if one_vm
+                end
 
                 if number != -1
                     next if @monitored_vms.include? number
                     @monitored_vms << number
+                    vm.one_item if vm.get_vm_id
                 end
 
-                vm.monitor
+                vm.monitor(esx_host_cpu,stats)
 
-                next if !vm["config"]
+                vm_name = "#{info["name"]} - #{cluster_name}"
 
                 str_info << %Q{
                 VM = [
                     ID="#{number}",
-                    VM_NAME="#{vm["name"]} - #{vm["runtime.host.parent.name"]}",
-                    DEPLOY_ID="#{vm["_ref"]}",
+                    VM_NAME="#{vm_name}",
+                    DEPLOY_ID="#{vm_ref}",
                 }
 
                 if number == -1
-                    vm_template_64 = Base64.encode64(vm.to_one).gsub("\n","")
+                    vm_template_64 = Base64.encode64(vm.vm_to_one(vm_name)).gsub("\n","")
 
                     str_info << "IMPORT_TEMPLATE=\"#{vm_template_64}\","
                 end
 
                 str_info << "POLL=\"#{vm.info.gsub('"', "\\\"")}\"]"
+
             rescue Exception => e
                 STDERR.puts e.inspect
                 STDERR.puts e.backtrace
             end
         end
 
-        return str_info.gsub(/^\s+/,"")
-    end
+        view.DestroyView # Destroy the view
 
+        return str_info
+    end
 
     def monitor_customizations
         customizations = self['_connection'].serviceContent.customizationSpecManager.info
diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/importer.rb b/src/vmm_mad/remotes/lib/vcenter_driver/importer.rb
index 3828068e36..17f22d0078 100644
--- a/src/vmm_mad/remotes/lib/vcenter_driver/importer.rb
+++ b/src/vmm_mad/remotes/lib/vcenter_driver/importer.rb
@@ -16,11 +16,7 @@ def self.import_clusters(con_ops, options)
 
         dc_folder = VCenterDriver::DatacenterFolder.new(vi_client)
 
-        # Get vcenter intance uuid as moref is unique for each vcenter
-        vc_uuid = vi_client.vim.serviceContent.about.instanceUuid
-
-        # Get vcenter API version
-        vc_version = vi_client.vim.serviceContent.about.apiVersion
+        vcenter_instance_name = vi_client.vim.host
 
         # OpenNebula's ClusterPool
         cpool = VCenterDriver::VIHelper.one_pool(OpenNebula::ClusterPool, false)
@@ -29,8 +25,6 @@ def self.import_clusters(con_ops, options)
             raise "Could not get OpenNebula ClusterPool: #{cpool.message}"
         end
 
-        cpool.info
-
         cluster_list = {}
         cpool.each do |c|
             cluster_list[c["ID"]] = c["NAME"]
@@ -43,7 +37,7 @@ def self.import_clusters(con_ops, options)
             raise "Could not get OpenNebula HostPool: #{hpool.message}"
         end
 
-        rs = dc_folder.get_unimported_hosts(hpool)
+        rs = dc_folder.get_unimported_hosts(hpool,vcenter_instance_name)
 
         STDOUT.print "done!\n\n"
 
@@ -175,6 +169,9 @@ def self.import_templates(con_ops, options)
             end
 
             tmps.each{ |t|
+                template = nil
+                template_copy_ref = nil
+                template_xml = nil
 
                 if !use_defaults
                     STDOUT.print "\n  * VM Template found:\n"\
@@ -186,9 +183,106 @@ def self.import_templates(con_ops, options)
                     next if STDIN.gets.strip.downcase != 'y'
                 end
 
+                # Linked Clones
+                if !use_defaults
+
+                    template = VCenterDriver::Template.new_from_ref(t[:vcenter_ref], vi_client)
+
+                    STDOUT.print "\n    For faster deployment operations"\
+                                 " and lower disk usage, OpenNebula"\
+                                 " can create new VMs as linked clones."\
+                                 "\n    Would you like to use Linked Clones with VMs based on this template (y/[n])? "
+
+                    if STDIN.gets.strip.downcase == 'y'
+
+                        STDOUT.print "\n    Linked clones requires that delta"\
+                                     " disks must be created for each disk in the template."\
+                                     " This operation may change the template contents."\
+                                     " \n    Do you want OpenNebula to create a copy of the template,"\
+                                     " so the original template remains untouched ([y]/n)? "
+
+                        template = t[:template]
+                        if STDIN.gets.strip.downcase != 'n'
+
+                            STDOUT.print "\n    The new template will be named"\
+                                         " adding a one- prefix to the name"\
+                                         " of the original template. \n"\
+                                         "    If you prefer a different name"\
+                                         " please specify or press Enter"\
+                                         " to use defaults: "
+
+                            template_name = STDIN.gets.strip.downcase
+
+                            STDOUT.print "\n    WARNING!!! The cloning operation can take some time"\
+                                         " depending on the size of disks. Please wait...\n"
+
+
+                            error, template_copy_ref = template.create_template_copy(template_name)
+
+                            if template_copy_ref
+
+                                template = VCenterDriver::Template.new_from_ref(template_copy_ref, vi_client)
+
+                                one_template = VCenterDriver::Template.get_xml_template(template, vc_uuid, vi_client, options[:vcenter], dc)
+
+                                if one_template
+                                    #Now create delta disks
+                                    STDOUT.print "\n    Delta disks are being created, please be patient..."
+
+                                    lc_error, use_lc = template.create_delta_disks
+                                    if lc_error
+                                        STDOUT.print "\n    ERROR. Something was wrong with the create delta disks on the template operation: #{lc_error}.\n"\
+                                                    "\n    Linked Clones will not be used with this template.\n"
+                                    else
+                                        one_template[:one] << "\nVCENTER_LINKED_CLONES=\"YES\"\n"
+                                        t = one_template
+                                    end
+                                else
+                                    STDOUT.print "\n    ERROR. Something was wrong obtaining the info from the template's copy.\n"\
+                                                 "\n    Linked Clones will not be used with this template.\n"
+                                    template.delete_template if template_copy_ref
+                                end
+
+                            else
+                                STDOUT.print "\n    ERROR. #{error}\n"
+                            end
+
+                        else
+                            # Create linked clones on top of the existing template
+                            # Create a VirtualMachine object from the template_copy_ref
+                            STDOUT.print "\n    Delta disks are being created, please be patient..."
+
+                            lc_error, use_lc = template.create_delta_disks
+                            if lc_error
+                                STDOUT.print "\n    ERROR. Something was wrong with the create delta disks on the template operation: #{lc_error}.\n"\
+                                             "\n    Linked Clones will not be used with this template.\n"
+                            end
+                            t[:one] << "\nVCENTER_LINKED_CLONES=\"YES\"\n" if use_lc
+                        end
+                    end
+                end
+
+                vcenter_vm_folder = ""
+                if !use_defaults
+                    STDOUT.print "\n\n    Do you want to specify a folder where"\
+                                    " the deployed VMs based on this template will appear"\
+                                    " in vSphere's VM and Templates section?"\
+                                    "\n    If no path is set, VMs will be placed in the same"\
+                                    " location where the template lives."\
+                                    "\n    Please specify a path using slashes to separate folders"\
+                                    " e.g /Management/VMs or press Enter to use defaults: "\
+
+                    vcenter_vm_folder = STDIN.gets.strip
+                    t[:one] << "VCENTER_VM_FOLDER=\"#{vcenter_vm_folder}\"\n" if !vcenter_vm_folder.empty?
+                end
+
                 ## Add existing disks to template (OPENNEBULA_MANAGED)
 
-                template = t[:template]
+                STDOUT.print "\n    The existing disks and networks in the template"\
+                             " are being imported, please be patient..."
+
+                template = t[:template] if !template
+
 
                 error, template_disks = template.import_vcenter_disks(vc_uuid,
                                                                       dpool,
@@ -198,15 +292,17 @@ def self.import_templates(con_ops, options)
                     t[:one] << template_disks
                 else
                     STDOUT.puts error
+                    template.delete_template if template_copy_ref
                     next
                 end
 
                 error, template_nics = template.import_vcenter_nics(vc_uuid,
-                                                                    npool)
+                                                                   npool)
                 if error.empty?
                     t[:one] << template_nics
                 else
                     STDOUT.puts error
+                    template.delete_template if template_copy_ref
                     next
                 end
 
@@ -217,7 +313,7 @@ def self.import_templates(con_ops, options)
                 if !use_defaults
 
                     if rp_split.size > 3
-                        STDOUT.print "\n    This template is currently set to "\
+                        STDOUT.print "\n\n    This template is currently set to "\
                             "launch VMs in the default resource pool."\
                             "\n    Press y to keep this behaviour, n to select"\
                             " a new resource pool or d to delegate the choice"\
@@ -238,15 +334,15 @@ def self.import_templates(con_ops, options)
                                         "\"#{list_of_rp}\""
                             input_str+= "\n    Press y to agree, or input a comma"\
                                         " separated list of resource pools to edit "\
-                                        "[y/comma separated list] "
+                                        "([y]/comma separated list) "
                             STDOUT.print input_str
 
                             answer = STDIN.gets.strip
 
-                            if answer.downcase == 'y'
-                                rp_input += rp_split[3] + "|"
-                            else
+                            if !answer.empty? && answer.downcase != 'y'
                                 rp_input += answer + "|"
+                            else
+                                rp_input += rp_split[3] + "|"
                             end
 
                             # Default
@@ -254,49 +350,37 @@ def self.import_templates(con_ops, options)
                                             "to the end user is set to"\
                                             " \"#{default_rp}\"."
                             input_str+= "\n    Press y to agree, or input a new "\
-                                        "resource pool [y/resource pool name] "
+                                        "resource pool ([y]/resource pool name) "
                             STDOUT.print input_str
 
                             answer = STDIN.gets.strip
 
-                            if answer.downcase == 'y'
-                                rp_input += rp_split[4]
-                            else
+                            if !answer.empty? && answer.downcase != 'y'
                                 rp_input += answer
+                            else
+                                rp_input += rp_split[4]
                             end
                         when 'n'
 
                             list_of_rp   = rp_split[-2]
 
-                            input_str = "    The list of available resource pools is:\n"
-
-                            STDOUT.print input_str
-
-                            dashes = ""
-                            100.times do
-                                dashes << "-"
-                            end
-
-                            list_str = "\n    [Index] Resource pool :"\
-                                    "\n    #{dashes}\n"
-
-                            STDOUT.print list_str
+                            STDOUT.print "    The list of available resource pools is:\n\n"
 
                             index = 1
-                            t[:rp_list].each do |rp|
-                                list_str = "    [#{index}] #{rp[:name]}\n"
+                            t[:rp_list].each do |r|
+                                list_str = "    - #{r[:name]}\n"
                                 index += 1
                                 STDOUT.print list_str
                             end
 
                             input_str = "\n    Please input the new default"\
-                                        " resource pool index in the list (e.g 1): "
+                                        " resource pool name: "
 
                             STDOUT.print input_str
 
                             answer = STDIN.gets.strip
 
-                            t[:one] << "VCENTER_RESOURCE_POOL=\"#{t[:rp_list][answer.to_i - 1][:name]}\"\n"
+                            t[:one] << "VCENTER_RESOURCE_POOL=\"#{answer}\"\n"
                         end
                     end
                 end
@@ -314,6 +398,7 @@ def self.import_templates(con_ops, options)
 
                 if ::OpenNebula.is_error?(rc)
                     STDOUT.puts "    Error creating template: #{rc.message}\n"
+                    template.delete_template if template_copy_ref
                 else
                     STDOUT.puts "    OpenNebula template #{one_t.id} created!\n"
                 end
@@ -343,20 +428,6 @@ def self.import_networks(con_ops, options)
 
         dc_folder = VCenterDriver::DatacenterFolder.new(vi_client)
 
-        # OpenNebula's ClusterPool
-        cpool = VCenterDriver::VIHelper.one_pool(OpenNebula::ClusterPool, false)
-
-        if cpool.respond_to?(:message)
-            raise "Could not get OpenNebula ClusterPool: #{cpool.message}"
-        end
-
-        cpool.info
-
-        cluster_list = {}
-        cpool.each do |c|
-            cluster_list[c["ID"]] = c["NAME"]
-        end
-
         # OpenNebula's VirtualNetworkPool
         npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool, false)
 
@@ -364,7 +435,7 @@ def self.import_networks(con_ops, options)
             raise "Could not get OpenNebula VirtualNetworkPool: #{npool.message}"
         end
 
-        rs = dc_folder.get_unimported_networks(npool)
+        rs = dc_folder.get_unimported_networks(npool,options[:vcenter])
 
         STDOUT.print "done!\n"
 
@@ -393,22 +464,6 @@ def self.import_networks(con_ops, options)
                     STDOUT.print print_str
 
                     next if STDIN.gets.strip.downcase != 'y'
-
-                    if cluster_list.size > 1
-                        STDOUT.print "\n    In which OpenNebula cluster do you want the network to be included?\n "
-
-                        cluster_list_str = "\n"
-                        cluster_list.each do |key, value|
-                            cluster_list_str << "      - ID: " << key << " - NAME: " << value << "\n"
-                        end
-
-                        STDOUT.print "\n    #{cluster_list_str}"
-                        STDOUT.print "\n    Specify the ID of the cluster or Enter to use the default cluster: "
-
-                        answer = STDIN.gets.strip
-                        one_cluster_id = answer if !answer.empty?
-                    end
-
                 end
 
                 size="255"
@@ -499,11 +554,7 @@ def self.import_networks(con_ops, options)
 
                 one_vn = VCenterDriver::VIHelper.new_one_item(OpenNebula::VirtualNetwork)
 
-                if one_cluster_id
-                    rc = one_vn.allocate(n[:one],one_cluster_id.to_i)
-                else
-                    rc = one_vn.allocate(n[:one])
-                end
+                rc = one_vn.allocate(n[:one])
 
                 if ::OpenNebula.is_error?(rc)
                     STDOUT.puts "\n    Error creating virtual network: " +
@@ -538,27 +589,20 @@ def self.import_datastore(con_ops, options)
 
         dc_folder = VCenterDriver::DatacenterFolder.new(vi_client)
 
-        # OpenNebula's ClusterPool
-        cpool = VCenterDriver::VIHelper.one_pool(OpenNebula::ClusterPool, false)
+        dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false)
 
-        if cpool.respond_to?(:message)
-            raise "Could not get OpenNebula ClusterPool: #{cpool.message}"
+        if dpool.respond_to?(:message)
+            raise "Could not get OpenNebula DatastorePool: #{dpool.message}"
         end
 
-        cpool.info
-
-        cluster_list = {}
-        cpool.each do |c|
-            cluster_list[c["ID"]] = c["NAME"]
-        end
-
-        hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false)
+        # Get OpenNebula's host pool
+        hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false)
 
         if hpool.respond_to?(:message)
-            raise "Could not get OpenNebula DatastorePool: #{hpool.message}"
+            raise "Could not get OpenNebula HostPool: #{hpool.message}"
         end
 
-        rs = dc_folder.get_unimported_datastores(hpool)
+        rs = dc_folder.get_unimported_datastores(dpool, options[:vcenter], hpool)
 
         STDOUT.print "done!\n"
 
@@ -585,21 +629,6 @@ def self.import_datastore(con_ops, options)
                                     "    Import this as Datastore [y/n]? "
 
                     next if STDIN.gets.strip.downcase != 'y'
-
-                    if cluster_list.size > 1
-                        STDOUT.print "\n    In which OpenNebula cluster do you want the datastore to be included?\n "
-
-                        cluster_list_str = "\n"
-                        cluster_list.each do |key, value|
-                            cluster_list_str << "      - ID: " << key << " - NAME: " << value << "\n"
-                        end
-
-                        STDOUT.print "\n    #{cluster_list_str}"
-                        STDOUT.print "\n    Specify the ID of the cluster or Enter to use the default cluster: "
-
-                        answer = STDIN.gets.strip
-                        one_cluster_id = answer if !answer.empty?
-                    end
                 end
 
                 one_d = VCenterDriver::VIHelper.new_one_item(OpenNebula::Datastore)
diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/network.rb b/src/vmm_mad/remotes/lib/vcenter_driver/network.rb
index 5baaeb20d2..1cb09c4775 100644
--- a/src/vmm_mad/remotes/lib/vcenter_driver/network.rb
+++ b/src/vmm_mad/remotes/lib/vcenter_driver/network.rb
@@ -69,21 +69,24 @@ class Network
 
 
     def self.to_one_template(network_name, network_ref, network_type,
-                             ccr_ref, ccr_name, vcenter_uuid)
+                             ccr_ref, ccr_name, vcenter_uuid,
+                             vcenter_instance_name, dc_name)
+
         one_tmp = {}
-        one_tmp[:name] = "#{network_name} - #{ccr_name}"
+        network_import_name = "[#{vcenter_instance_name} - #{dc_name}] #{network_name} - #{ccr_name.tr(" ", "_")}"
+        one_tmp[:name] = network_import_name
         one_tmp[:bridge] = network_name
         one_tmp[:type] = network_type
         one_tmp[:cluster] = ccr_name
         one_tmp[:vcenter_ccr_ref] = ccr_ref
-        one_tmp[:one] = to_one(network_name, network_ref, network_type,
-                             ccr_ref, ccr_name, vcenter_uuid)
+        one_tmp[:one] = to_one(network_import_name, network_name, network_ref, network_type,
+                             ccr_ref, vcenter_uuid)
         return one_tmp
     end
 
-    def self.to_one(network_name, network_ref, network_type,
-                    ccr_ref, ccr_name, vcenter_uuid)
-        template = "NAME=\"#{network_name} - #{ccr_name}\"\n"\
+    def self.to_one(network_import_name, network_name, network_ref, network_type,
+                    ccr_ref, vcenter_uuid)
+        template = "NAME=\"#{network_import_name}\"\n"\
                    "BRIDGE=\"#{network_name}\"\n"\
                    "VN_MAD=\"dummy\"\n"\
                    "VCENTER_PORTGROUP_TYPE=\"#{network_type}\"\n"\
diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb b/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb
index c14d8de3d9..9a0bef6c7c 100644
--- a/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb
+++ b/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb
@@ -24,7 +24,7 @@ class VirtualMachineFolder
         VIClient.get_entities(@item, "VirtualMachine").each do |item|
             if item.config.template
                 item_name = item._ref
-                @items[item_name.to_sym] = VirtualMachine.new(item)
+                @items[item_name.to_sym] = Template.new(item)
             end
         end
     end
@@ -44,13 +44,7 @@ class VirtualMachineFolder
     end
 end # class VirtualMachineFolder
 
-class VirtualMachine
-    VM_PREFIX_DEFAULT = "one-$i-"
-
-    POLL_ATTRIBUTE    = OpenNebula::VirtualMachine::Driver::POLL_ATTRIBUTE
-    VM_STATE          = OpenNebula::VirtualMachine::Driver::VM_STATE
-
-    VM_SHUTDOWN_TIMEOUT = 600 #10 minutes til poweroff hard
+class Template
 
     attr_accessor :item
 
@@ -77,6 +71,648 @@ class VirtualMachine
         end
     end
 
+    def get_dc
+        item = @item
+
+        while !item.instance_of? RbVmomi::VIM::Datacenter
+            item = item.parent
+            if item.nil?
+                raise "Could not find the parent Datacenter"
+            end
+        end
+
+        Datacenter.new(item)
+    end
+
+    def delete_template
+        @item.Destroy_Task.wait_for_completion
+    end
+
+    def get_vcenter_instance_uuid
+        @vi_client.vim.serviceContent.about.instanceUuid rescue nil
+    end
+
+    def create_template_copy(template_name)
+        error = nil
+        template_ref = nil
+
+        template_name = "one-#{self['name']}" if template_name.empty?
+
+        relocate_spec_params = {}
+        relocate_spec_params[:pool] = get_rp
+        relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(relocate_spec_params)
+
+        clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec({
+            :location => relocate_spec,
+            :powerOn  => false,
+            :template => false
+        })
+
+        template = nil
+        begin
+            template = @item.CloneVM_Task(:folder => @item.parent,
+                                          :name   => template_name,
+                                          :spec   => clone_spec).wait_for_completion
+            template_ref = template._ref
+        rescue Exception => e
+            if !e.message.start_with?('DuplicateName')
+                error = "Could not create the template clone. Reason: #{e.message}"
+                return error, nil
+            end
+
+            dc = get_dc
+            vm_folder = dc.vm_folder
+            vm_folder.fetch!
+            vm = vm_folder.items
+                    .select{|k,v| v.item.name == template_name}
+                    .values.first.item rescue nil
+
+            if vm
+                begin
+                    vm.Destroy_Task.wait_for_completion
+                    template = @item.CloneVM_Task(:folder => @item.parent,
+                                                  :name   => template_name,
+                                                  :spec   => clone_spec).wait_for_completion
+                    template_ref = template._ref
+                rescue
+                    error = "Could not delete the existing template, please remove it manually from vCenter. Reason: #{e.message}"
+                end
+            else
+                error = "Could not create the template clone. Reason: #{e.message}"
+            end
+        end
+
+        return error, template_ref
+    end
+
+    # Linked Clone over existing template
+    def create_delta_disks
+
+        begin
+            disks = @item['config.hardware.device'].grep(RbVmomi::VIM::VirtualDisk)
+            disk_without_snapshots = disks.select { |x| x.backing.parent.nil? }
+        rescue
+            error = "Cannot extract existing disks on template."
+            use_linked_clones = false
+            return error, use_linked_clones
+        end
+
+        if !disk_without_snapshots.empty?
+
+            begin
+                if self['config.template']
+                    @item.MarkAsVirtualMachine(:pool => get_rp, :host => self['runtime.host'])
+                end
+            rescue Exception => e
+                @item.MarkAsTemplate()
+                error = "Cannot mark the template as a VirtualMachine. Not using linked clones. Reason: #{e.message}/#{e.backtrace}"
+                use_linked_clones = false
+                return error, use_linked_clones
+            end
+
+            begin
+                spec = {}
+                spec[:deviceChange] = []
+
+                disk_without_snapshots.each do |disk|
+                    remove_disk_spec = { :operation => :remove, :device => disk }
+                    spec[:deviceChange] << remove_disk_spec
+
+                    add_disk_spec = { :operation => :add,
+                                    :fileOperation => :create,
+                                    :device => disk.dup.tap { |x|
+                                            x.backing = x.backing.dup
+                                            x.backing.fileName = "[#{disk.backing.datastore.name}]"
+                                            x.backing.parent = disk.backing
+                                    }
+                    }
+                    spec[:deviceChange] << add_disk_spec
+                end
+
+                @item.ReconfigVM_Task(:spec => spec).wait_for_completion if !spec[:deviceChange].empty?
+            rescue Exception => e
+                error = "Cannot create the delta disks on top of the template. Reason: #{e.message}."
+                use_linked_clones = false
+                return error, use_linked_clones
+            end
+
+            begin
+                @item.MarkAsTemplate()
+            rescue
+                error = "Cannot mark the VirtualMachine as a template. Not using linked clones."
+                use_linked_clones = false
+                return error, use_linked_clones
+            end
+
+            error = nil
+            use_linked_clones = true
+            return error, use_linked_clones
+        else
+            # Template already has delta disks
+            error = nil
+            use_linked_clones = true
+            return error, use_linked_clones
+        end
+    end
+
+    def import_vcenter_disks(vc_uuid, dpool, ipool)
+        disk_info = ""
+        error = ""
+
+        begin
+            lock #Lock import operation, to avoid concurrent creation of images
+
+            ccr_ref = self["runtime.host.parent._ref"]
+
+            #Get disks and info required
+            vc_disks = get_vcenter_disks
+
+            # Track allocated images
+            allocated_images = []
+
+            vc_disks.each do |disk|
+                datastore_found = VCenterDriver::Storage.get_one_image_ds_by_ref_and_ccr(disk[:datastore]._ref,
+                                                                                        ccr_ref,
+                                                                                        vc_uuid,
+                                                                                        dpool)
+                if datastore_found.nil?
+                    error = "\n    ERROR: datastore #{disk[:datastore].name}: has to be imported first as an image datastore!\n"
+
+                    #Rollback delete disk images
+                    allocated_images.each do |i|
+                        i.delete
+                    end
+
+                    break
+                end
+
+                image_import = VCenterDriver::Datastore.get_image_import_template(disk[:datastore].name,
+                                                                                disk[:path],
+                                                                                disk[:type], ipool)
+                #Image is already in the datastore
+                if image_import[:one]
+                    # This is the disk info
+                    disk_info << "DISK=[\n"
+                    disk_info << "IMAGE_ID=\"#{image_import[:one]["ID"]}\",\n"
+                    disk_info << "OPENNEBULA_MANAGED=\"NO\"\n"
+                    disk_info << "]\n"
+                elsif !image_import[:template].empty?
+                    # Then the image is created as it's not in the datastore
+                    one_i = VCenterDriver::VIHelper.new_one_item(OpenNebula::Image)
+
+                    allocated_images << one_i
+
+                    rc = one_i.allocate(image_import[:template], datastore_found['ID'].to_i)
+
+                    if ::OpenNebula.is_error?(rc)
+                        error = "    Error creating disk from template: #{rc.message}. Cannot import the template\n"
+
+                        #Rollback delete disk images
+                        allocated_images.each do |i|
+                            i.delete
+                        end
+
+                        break
+                    end
+
+                    #Add info for One template
+                    one_i.info
+                    disk_info << "DISK=[\n"
+                    disk_info << "IMAGE_ID=\"#{one_i["ID"]}\",\n"
+                    disk_info << "IMAGE_UNAME=\"#{one_i["UNAME"]}\",\n"
+                    disk_info << "OPENNEBULA_MANAGED=\"NO\"\n"
+                    disk_info << "]\n"
+                end
+            end
+
+        rescue Exception => e
+            error = "There was an error trying to create an image for disk in vcenter template. Reason: #{e.message}"
+        ensure
+            unlock
+        end
+
+        return error, disk_info
+    end
+
+    def import_vcenter_nics(vc_uuid, npool)
+        nic_info = ""
+        error = ""
+
+        begin
+            lock #Lock import operation, to avoid concurrent creation of images
+
+            ccr_ref  = self["runtime.host.parent._ref"]
+            ccr_name = self["runtime.host.parent.name"]
+
+            #Get disks and info required
+            vc_nics = get_vcenter_nics
+
+            # Track allocated networks
+            allocated_networks = []
+
+            vc_nics.each do |nic|
+
+                network_found = VCenterDriver::Network.get_one_vnet_ds_by_ref_and_ccr(nic[:net_ref],
+                                                                                    ccr_ref,
+                                                                                    vc_uuid,
+                                                                                    npool)
+                #Network is already in the datastore
+                if network_found
+                    # This is the existing nic info
+                    nic_info << "NIC=[\n"
+                    nic_info << "NETWORK_ID=\"#{network_found["ID"]}\",\n"
+                    nic_info << "OPENNEBULA_MANAGED=\"NO\"\n"
+                    nic_info << "]\n"
+                else
+                    # Then the network has to be created as it's not in OpenNebula
+                    one_vn = VCenterDriver::VIHelper.new_one_item(OpenNebula::VirtualNetwork)
+
+                    allocated_networks << one_vn
+
+                    one_vnet = VCenterDriver::Network.to_one_template(nic[:net_name],
+                                                                    nic[:net_ref],
+                                                                    nic[:pg_type],
+                                                                    ccr_ref,
+                                                                    ccr_name,
+                                                                    vc_uuid)
+
+                    # By default add an ethernet range to network size 255
+                    ar_str = ""
+                    ar_str << "AR=[\n"
+                    ar_str << "TYPE=\"ETHER\",\n"
+                    ar_str << "SIZE=\"255\"\n"
+                    ar_str << "]\n"
+                    one_vnet[:one] << ar_str
+
+                    rc = one_vn.allocate(one_vnet[:one])
+
+                    if ::OpenNebula.is_error?(rc)
+                        error = "    Error creating virtual network from template: #{rc.message}. Cannot import the template\n"
+
+                        #Rollback, delete virtual networks
+                        allocated_networks.each do |n|
+                            n.delete
+                        end
+
+                        break
+                    end
+
+                    #Add info for One template
+                    one_vn.info
+                    nic_info << "NIC=[\n"
+                    nic_info << "NETWORK_ID=\"#{one_vn["ID"]}\",\n"
+                    nic_info << "OPENNEBULA_MANAGED=\"NO\"\n"
+                    nic_info << "]\n"
+                end
+            end
+
+        rescue Exception => e
+            error = "There was an error trying to create a virtual network for network in vcenter template. Reason: #{e.message}"
+        ensure
+            unlock
+        end
+
+        return error, nic_info
+    end
+
+    def get_vcenter_disks
+
+        disks = []
+        @item["config.hardware.device"].each do |device|
+            disk = {}
+            if is_disk_or_iso?(device)
+                disk[:device]    = device
+                disk[:datastore] = device.backing.datastore
+                disk[:path]      = device.backing.fileName
+                disk[:path_wo_ds]= disk[:path].sub(/^\[(.*?)\] /, "")
+                disk[:type]      = is_disk?(device) ? "OS" : "CDROM"
+                disks << disk
+            end
+        end
+
+        return disks
+    end
+
+    def get_vcenter_nics
+        nics = []
+        @item["config.hardware.device"].each do |device|
+            nic = {}
+            if is_nic?(device)
+                nic[:net_name]  = device.backing.network.name
+                nic[:net_ref]   = device.backing.network._ref
+                nic[:pg_type]   = VCenterDriver::Network.get_network_type(device)
+                nics << nic
+            end
+        end
+        return nics
+    end
+
+    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk or a cdrom
+    def is_disk_or_cdrom?(device)
+        is_disk  = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
+        is_cdrom = !(device.class.ancestors.index(RbVmomi::VIM::VirtualCdrom)).nil?
+        is_disk || is_cdrom
+    end
+
+    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk or an iso file
+    def is_disk_or_iso?(device)
+        is_disk  = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
+        is_iso = device.backing.is_a? RbVmomi::VIM::VirtualCdromIsoBackingInfo
+        is_disk || is_iso
+    end
+
+    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk
+    def is_disk?(device)
+        !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
+    end
+
+    #  Checks if a RbVmomi::VIM::VirtualDevice is a network interface
+    def is_nic?(device)
+        !device.class.ancestors.index(RbVmomi::VIM::VirtualEthernetCard).nil?
+    end
+
+    # @return RbVmomi::VIM::ResourcePool, first resource pool in cluster
+    def get_rp
+        self['runtime.host.parent.resourcePool']
+    end
+
+    def to_one_template(template, cluster_ref, cluster_name, has_nics_and_disks, rp, rp_list, vcenter_uuid, vcenter_instance_name, dc_name)
+
+        template_ref  = template['_ref']
+        template_name = template["name"]
+
+        one_tmp = {}
+        one_tmp[:name]                  = "[#{vcenter_instance_name} - #{dc_name}] #{template_name} - #{cluster_name}"
+        one_tmp[:template_name]         = template_name
+        one_tmp[:vcenter_ccr_ref]       = cluster_ref
+        one_tmp[:vcenter_ref]           = template_ref
+        one_tmp[:vcenter_instance_uuid] = vcenter_uuid
+        one_tmp[:cluster_name]          = cluster_name
+        one_tmp[:rp]                    = rp
+        one_tmp[:rp_list]               = rp_list
+        one_tmp[:template]              = template
+        one_tmp[:import_disks_and_nics] = has_nics_and_disks
+
+        one_tmp[:one]                   = to_one(true, vcenter_uuid, cluster_ref, cluster_name, vcenter_instance_name, dc_name)
+        return one_tmp
+    end
+
+    def vm_to_one(vm_name)
+
+        str = "NAME   = \"#{vm_name}\"\n"\
+              "CPU    = \"#{@vm_info["config.hardware.numCPU"]}\"\n"\
+              "vCPU   = \"#{@vm_info["config.hardware.numCPU"]}\"\n"\
+              "MEMORY = \"#{@vm_info["config.hardware.memoryMB"]}\"\n"\
+              "HYPERVISOR = \"vcenter\"\n"\
+              "SCHED_REQUIREMENTS=\"ID=\\\"#{@vm_info["host_id"]}\\\"\"\n"\
+              "CONTEXT = [\n"\
+              "    NETWORK = \"YES\",\n"\
+              "    SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\
+              "]\n"\
+              "VCENTER_INSTANCE_ID =\"#{@vm_info["vc_uuid"]}\"\n"
+
+        str << "IMPORT_VM_ID =\"#{self["_ref"]}\"\n"
+        str << "IMPORT_STATE =\"#{@state}\"\n"
+
+        vnc_port = nil
+        keymap = nil
+
+        @vm_info["config.extraConfig"].select do |xtra|
+            if xtra[:key].downcase=="remotedisplay.vnc.port"
+                vnc_port = xtra[:value]
+            end
+
+            if xtra[:key].downcase=="remotedisplay.vnc.keymap"
+                keymap = xtra[:value]
+            end
+        end
+
+        if !@vm_info["config.extraConfig"].empty?
+            str << "GRAPHICS = [\n"\
+                   "  TYPE     =\"vnc\",\n"
+            str << "  PORT     =\"#{vnc_port}\",\n" if vnc_port
+            str << "  KEYMAP   =\"#{keymap}\",\n" if keymap
+            str << "  LISTEN   =\"0.0.0.0\"\n"
+            str << "]\n"
+        end
+
+        if !@vm_info["config.annotation"] || @vm_info["config.annotation"].empty?
+            str << "DESCRIPTION = \"vCenter Template imported by OpenNebula" \
+                " from Cluster #{@vm_info["cluster_name"]}\"\n"
+        else
+            notes = @vm_info["config.annotation"].gsub("\\", "\\\\").gsub("\"", "\\\"")
+            str << "DESCRIPTION = \"#{notes}\"\n"
+        end
+
+        case @vm_info["guest.guestFullName"]
+            when /CentOS/i
+                str << "LOGO=images/logos/centos.png\n"
+            when /Debian/i
+                str << "LOGO=images/logos/debian.png\n"
+            when /Red Hat/i
+                str << "LOGO=images/logos/redhat.png\n"
+            when /Ubuntu/i
+                str << "LOGO=images/logos/ubuntu.png\n"
+            when /Windows XP/i
+                str << "LOGO=images/logos/windowsxp.png\n"
+            when /Windows/i
+                str << "LOGO=images/logos/windows8.png\n"
+            when /Linux/i
+                str << "LOGO=images/logos/linux.png\n"
+        end
+
+        return str
+    end
+
+    def to_one(template=false, vc_uuid=nil, ccr_ref=nil, ccr_name=nil, vcenter_instance_name, dc_name)
+
+        if !ccr_ref && !ccr_name
+            cluster  = @item["runtime.host"].parent
+            ccr_name = cluster.name
+            ccr_ref  = cluster._ref
+        end
+
+        vc_uuid  = self["_connection.serviceContent.about.instanceUuid"] if !vc_uuid
+
+        # Get info of the host where the VM/template is located
+        host_id = nil
+        one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
+                                                       "TEMPLATE/VCENTER_CCR_REF",
+                                                       ccr_ref,
+                                                       vc_uuid)
+
+        num_cpu, memory, extraconfig, annotation, guest_fullname = @item.collect("config.hardware.numCPU","config.hardware.memoryMB","config.extraConfig","config.annotation","guest.guestFullName")
+        host_id = one_host["ID"] if one_host
+
+        name = ""
+        if template
+            name << "[#{vcenter_instance_name} - #{dc_name}] #{self["name"]} - #{ccr_name.tr(" ", "_")}"
+        else
+            name << "#{self["name"]} - #{ccr_name.tr(" ", "_")}"
+        end
+
+        str = "NAME   = \"#{name}\"\n"\
+              "CPU    = \"#{num_cpu}\"\n"\
+              "vCPU   = \"#{num_cpu}\"\n"\
+              "MEMORY = \"#{memory}\"\n"\
+              "HYPERVISOR = \"vcenter\"\n"\
+              "SCHED_REQUIREMENTS=\"ID=\\\"#{host_id}\\\"\"\n"\
+              "CONTEXT = [\n"\
+              "    NETWORK = \"YES\",\n"\
+              "    SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\
+              "]\n"\
+              "VCENTER_INSTANCE_ID =\"#{vc_uuid}\"\n"
+
+        if !template
+            str << "IMPORT_VM_ID =\"#{self["_ref"]}\"\n"
+            str << "IMPORT_STATE =\"#{@state}\"\n"
+        end
+
+        if template
+            str << "VCENTER_TEMPLATE_REF =\"#{self['_ref']}\"\n"
+            str << "VCENTER_CCR_REF =\"#{ccr_ref}\"\n"
+        end
+
+        vnc_port = nil
+        keymap = nil
+
+        if !template
+            extraconfig.select do |xtra|
+
+                if xtra[:key].downcase=="remotedisplay.vnc.port"
+                    vnc_port = xtra[:value]
+                end
+
+                if xtra[:key].downcase=="remotedisplay.vnc.keymap"
+                    keymap = xtra[:value]
+                end
+            end
+        end
+
+        if !extraconfig.empty?
+            str << "GRAPHICS = [\n"\
+                   "  TYPE     =\"vnc\",\n"
+            str << "  PORT     =\"#{vnc_port}\",\n" if vnc_port
+            str << "  KEYMAP   =\"#{keymap}\",\n" if keymap
+            str << "  LISTEN   =\"0.0.0.0\"\n"
+            str << "]\n"
+        end
+
+        if annotation.nil? || annotation.empty?
+            str << "DESCRIPTION = \"vCenter Template imported by OpenNebula" \
+                " from Cluster #{ccr_name}\"\n"
+        else
+            notes = annotation.gsub("\\", "\\\\").gsub("\"", "\\\"")
+            str << "DESCRIPTION = \"#{notes}\"\n"
+        end
+
+        case guest_fullname
+            when /CentOS/i
+                str << "LOGO=images/logos/centos.png\n"
+            when /Debian/i
+                str << "LOGO=images/logos/debian.png\n"
+            when /Red Hat/i
+                str << "LOGO=images/logos/redhat.png\n"
+            when /Ubuntu/i
+                str << "LOGO=images/logos/ubuntu.png\n"
+            when /Windows XP/i
+                str << "LOGO=images/logos/windowsxp.png\n"
+            when /Windows/i
+                str << "LOGO=images/logos/windows8.png\n"
+            when /Linux/i
+                str << "LOGO=images/logos/linux.png\n"
+        end
+
+        return str
+    end
+
+    def self.get_xml_template(template, vcenter_uuid, vi_client, vcenter_instance_name=nil, dc_name=nil, rp_cache={})
+
+        begin
+            template_ccr  = template['runtime.host.parent']
+            template_ccr_ref = template_ccr._ref
+            template_ccr_name =template_ccr.name
+
+            vcenter_instance_name = vi_client.vim.host if !vcenter_instance_name
+
+            if !dc_name
+                dc = get_dc
+                dc_name = dc.item.name
+            end
+            # Check if template has nics or disks to be imported later
+            has_nics_and_disks = true
+            ##template["config.hardware.device"].each do |device|
+            ##    if VCenterDriver::Storage.is_disk_or_iso?(device) ||
+            ##    VCenterDriver::Network.is_nic?(device)
+            ##        has_nics_and_disks = true
+            ##        break
+            ##    end
+            ##end
+
+            #Get resource pools
+
+            if !rp_cache[template_ccr_name]
+                tmp_cluster = VCenterDriver::ClusterComputeResource.new_from_ref(template_ccr_ref, vi_client)
+                rp_list = tmp_cluster.get_resource_pool_list
+                rp = ""
+                if !rp_list.empty?
+                    rp_name_list = []
+                    rp_list.each do |rp_hash|
+                        rp_name_list << rp_hash[:name]
+                    end
+                    rp =  "O|list|Which resource pool you want this VM to run in? "
+                    rp << "|#{rp_name_list.join(",")}" #List of RP
+                    rp << "|#{rp_name_list.first}" #Default RP
+                end
+                rp_cache[template_ccr_name] = rp
+            end
+            rp = rp_cache[template_ccr_name]
+
+            object = template.to_one_template(template,
+                                            template_ccr_ref,
+                                            template_ccr_name,
+                                            has_nics_and_disks,
+                                            rp,
+                                            rp_list,
+                                            vcenter_uuid,
+                                            vcenter_instance_name,
+                                            dc_name)
+
+            return object
+
+        rescue
+            return nil
+        end
+    end
+
+    # TODO check with uuid
+    def self.new_from_ref(ref, vi_client)
+        self.new(RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref), vi_client)
+    end
+
+end
+
+class VirtualMachine < Template
+    VM_PREFIX_DEFAULT = "one-$i-"
+
+    POLL_ATTRIBUTE    = OpenNebula::VirtualMachine::Driver::POLL_ATTRIBUTE
+    VM_STATE          = OpenNebula::VirtualMachine::Driver::VM_STATE
+
+    VM_SHUTDOWN_TIMEOUT = 600 #10 minutes til poweroff hard
+
+    attr_accessor :item
+
+    attr_accessor :vm_info
+
+    include Memoize
+
+    def initialize(item=nil, vi_client=nil)
+        @item = item
+        @vi_client = vi_client
+        @locking = true
+        @vm_info = nil
+    end
+
     ############################################################################
     ############################################################################
 
@@ -95,7 +731,7 @@ class VirtualMachine
     # The OpenNebula VM
     # @return OpenNebula::VirtualMachine or XMLElement
     def one_item
-        if @one_item.nil?
+        if !@one_item
             vm_id = get_vm_id
 
             raise "Unable to find vm_id." if vm_id.nil?
@@ -471,7 +1107,7 @@ class VirtualMachine
         vcenter_disks = get_vcenter_disks
 
         # Create an array with the paths of the disks in vcenter template
-        template = VCenterDriver::VirtualMachine.new_from_ref(template_ref, vi_client)
+        template = VCenterDriver::Template.new_from_ref(template_ref, vi_client)
         template_disks = template.get_vcenter_disks
         template_disks_vector = []
         template_disks.each_with_index do |d, index|
@@ -1088,11 +1724,6 @@ class VirtualMachine
         end
     end
 
-    #  Checks if a RbVmomi::VIM::VirtualDevice is a network interface
-    def is_nic?(device)
-        !device.class.ancestors.index(RbVmomi::VIM::VirtualEthernetCard).nil?
-    end
-
     def disks_in_onevm
         onevm_disks_vector = []
         disks = one_item.retrieve_xmlelements("TEMPLATE/DISK")
@@ -1381,86 +2012,6 @@ class VirtualMachine
         return nics
     end
 
-    def import_vcenter_disks(vc_uuid, dpool, ipool)
-        disk_info = ""
-        error = ""
-
-        begin
-            lock #Lock import operation, to avoid concurrent creation of images
-
-            ccr_ref = self["runtime.host.parent._ref"]
-
-            #Get disks and info required
-            vc_disks = get_vcenter_disks
-
-            # Track allocated images
-            allocated_images = []
-
-            vc_disks.each do |disk|
-
-                datastore_found = VCenterDriver::Storage.get_one_image_ds_by_ref_and_ccr(disk[:datastore]._ref,
-                                                                                        ccr_ref,
-                                                                                        vc_uuid,
-                                                                                        dpool)
-                if datastore_found.nil?
-                    error = "    Error datastore #{disk[:datastore].name}: has to be imported first as an image datastore!\n"
-
-                    #Rollback delete disk images
-                    allocated_images.each do |i|
-                        i.delete
-                    end
-
-                    break
-                end
-
-                image_import = VCenterDriver::Datastore.get_image_import_template(disk[:datastore].name,
-                                                                                disk[:path],
-                                                                                disk[:type], ipool)
-                #Image is already in the datastore
-                if image_import[:one]
-                    # This is the disk info
-                    disk_info << "DISK=[\n"
-                    disk_info << "IMAGE_ID=\"#{image_import[:one]["ID"]}\",\n"
-                    disk_info << "OPENNEBULA_MANAGED=\"NO\"\n"
-                    disk_info << "]\n"
-                elsif !image_import[:template].empty?
-                    # Then the image is created as it's not in the datastore
-                    one_i = VCenterDriver::VIHelper.new_one_item(OpenNebula::Image)
-
-                    allocated_images << one_i
-
-                    rc = one_i.allocate(image_import[:template], datastore_found['ID'].to_i)
-
-                    if ::OpenNebula.is_error?(rc)
-                        error = "    Error creating disk from template: #{rc.message}. Cannot import the template\n"
-
-                        #Rollback delete disk images
-                        allocated_images.each do |i|
-                            i.delete
-                        end
-
-                        break
-                    end
-
-                    #Add info for One template
-                    one_i.info
-                    disk_info << "DISK=[\n"
-                    disk_info << "IMAGE_ID=\"#{one_i["ID"]}\",\n"
-                    disk_info << "IMAGE_UNAME=\"#{one_i["UNAME"]}\",\n"
-                    disk_info << "OPENNEBULA_MANAGED=\"NO\"\n"
-                    disk_info << "]\n"
-                end
-            end
-
-        rescue Exception => e
-            error = "There was an error trying to create an image for disk in vcenter template. Reason: #{e.message}"
-        ensure
-            unlock
-        end
-
-        return error, disk_info
-    end
-
     def remove_poweroff_detached_vcenter_nets(networks)
         esx_host = VCenterDriver::ESXHost.new_from_ref(@item.runtime.host._ref, vi_client)
         dc = cluster.get_dc # Get datacenter
@@ -1518,106 +2069,6 @@ class VirtualMachine
         end
     end
 
-    def import_vcenter_nics(vc_uuid, npool)
-        nic_info = ""
-        error = ""
-
-        begin
-            lock #Lock import operation, to avoid concurrent creation of images
-
-            ccr_ref  = self["runtime.host.parent._ref"]
-            ccr_name = self["runtime.host.parent.name"]
-
-            #Get disks and info required
-            vc_nics = get_vcenter_nics
-
-            # Track allocated networks
-            allocated_networks = []
-
-            vc_nics.each do |nic|
-
-                network_found = VCenterDriver::Network.get_one_vnet_ds_by_ref_and_ccr(nic[:net_ref],
-                                                                                    ccr_ref,
-                                                                                    vc_uuid,
-                                                                                    npool)
-                #Network is already in the datastore
-                if network_found
-                    # This is the existing nic info
-                    nic_info << "NIC=[\n"
-                    nic_info << "NETWORK_ID=\"#{network_found["ID"]}\",\n"
-                    nic_info << "OPENNEBULA_MANAGED=\"NO\"\n"
-                    nic_info << "]\n"
-                else
-                    # Then the network has to be created as it's not in OpenNebula
-                    one_vn = VCenterDriver::VIHelper.new_one_item(OpenNebula::VirtualNetwork)
-
-                    allocated_networks << one_vn
-
-                    one_vnet = VCenterDriver::Network.to_one_template(nic[:net_name],
-                                                                    nic[:net_ref],
-                                                                    nic[:pg_type],
-                                                                    ccr_ref,
-                                                                    ccr_name,
-                                                                    vc_uuid)
-
-                    # By default add an ethernet range to network size 255
-                    ar_str = ""
-                    ar_str << "AR=[\n"
-                    ar_str << "TYPE=\"ETHER\",\n"
-                    ar_str << "SIZE=\"255\"\n"
-                    ar_str << "]\n"
-                    one_vnet[:one] << ar_str
-
-                    rc = one_vn.allocate(one_vnet[:one])
-
-                    if ::OpenNebula.is_error?(rc)
-                        error = "    Error creating virtual network from template: #{rc.message}. Cannot import the template\n"
-
-                        #Rollback, delete virtual networks
-                        allocated_networks.each do |n|
-                            n.delete
-                        end
-
-                        break
-                    end
-
-                    #Add info for One template
-                    one_vn.info
-                    nic_info << "NIC=[\n"
-                    nic_info << "NETWORK_ID=\"#{one_vn["ID"]}\",\n"
-                    nic_info << "OPENNEBULA_MANAGED=\"NO\"\n"
-                    nic_info << "]\n"
-                end
-            end
-
-        rescue Exception => e
-            error = "There was an error trying to create a virtual network for network in vcenter template. Reason: #{e.message}"
-        ensure
-            unlock
-        end
-
-        return error, nic_info
-    end
-
-    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk or a cdrom
-    def is_disk_or_cdrom?(device)
-        is_disk  = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
-        is_cdrom = !(device.class.ancestors.index(RbVmomi::VIM::VirtualCdrom)).nil?
-        is_disk || is_cdrom
-    end
-
-    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk or an iso file
-    def is_disk_or_iso?(device)
-        is_disk  = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
-        is_iso = device.backing.is_a? RbVmomi::VIM::VirtualCdromIsoBackingInfo
-        is_disk || is_iso
-    end
-
-    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk
-    def is_disk?(device)
-        !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
-    end
-
     def find_free_controller(position=0)
         free_scsi_controllers = []
         scsi_schema           = {}
@@ -1871,131 +2322,21 @@ class VirtualMachine
     # monitoring
     ############################################################################
 
-    def to_one(template=false)
-        cluster  = self["runtime.host.parent.name"]
-        ccr_ref  = self["runtime.host.parent._ref"]
-        vc_uuid  = self["_connection.serviceContent.about.instanceUuid"]
+    def monitor(esx_host_cpu, stats)
 
-        # Get info of the host where the VM/template is located
-        host_id = nil
-        one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
-                                                       "TEMPLATE/VCENTER_CCR_REF",
-                                                       ccr_ref,
-                                                       vc_uuid)
-        host_id = one_host["ID"] if one_host
-
-        str = "NAME   = \"#{self["name"]} - #{cluster}\"\n"\
-              "CPU    = \"#{@item["config.hardware.numCPU"]}\"\n"\
-              "vCPU   = \"#{@item["config.hardware.numCPU"]}\"\n"\
-              "MEMORY = \"#{@item["config.hardware.memoryMB"]}\"\n"\
-              "HYPERVISOR = \"vcenter\"\n"\
-              "SCHED_REQUIREMENTS=\"ID=\\\"#{host_id}\\\"\"\n"\
-              "CONTEXT = [\n"\
-              "    NETWORK = \"YES\",\n"\
-              "    SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\
-              "]\n"\
-              "VCENTER_INSTANCE_ID =\"#{vc_uuid}\"\n"
-
-        if !template
-            str << "IMPORT_VM_ID =\"#{self["_ref"]}\"\n"
-            str << "IMPORT_STATE =\"#{@state}\"\n"
-        end
-
-        if template
-            str << "VCENTER_TEMPLATE_REF =\"#{self['_ref']}\"\n"
-            str << "VCENTER_CCR_REF =\"#{ccr_ref}\"\n"
-        end
-
-        vnc_port = nil
-        keymap = nil
-
-        if !template
-            @item["config.extraConfig"].select do |xtra|
-
-                if xtra[:key].downcase=="remotedisplay.vnc.port"
-                    vnc_port = xtra[:value]
-                end
-
-                if xtra[:key].downcase=="remotedisplay.vnc.keymap"
-                    keymap = xtra[:value]
-                end
-            end
-        end
-
-        if @item["config.extraConfig"].size > 0
-            str << "GRAPHICS = [\n"\
-                   "  TYPE     =\"vnc\",\n"
-            str << "  PORT     =\"#{vnc_port}\",\n" if vnc_port
-            str << "  KEYMAP   =\"#{keymap}\",\n" if keymap
-            str << "  LISTEN   =\"0.0.0.0\"\n"
-            str << "]\n"
-        end
-
-        if @item["config.annotation"].nil? || @item["config.annotation"].empty?
-            str << "DESCRIPTION = \"vCenter Template imported by OpenNebula" \
-                " from Cluster #{cluster}\"\n"
-        else
-            notes = @item["config.annotation"].gsub("\\", "\\\\").gsub("\"", "\\\"")
-            str << "DESCRIPTION = \"#{notes}\"\n"
-        end
-
-        case self["guest.guestFullName"]
-            when /CentOS/i
-                str << "LOGO=images/logos/centos.png\n"
-            when /Debian/i
-                str << "LOGO=images/logos/debian.png\n"
-            when /Red Hat/i
-                str << "LOGO=images/logos/redhat.png\n"
-            when /Ubuntu/i
-                str << "LOGO=images/logos/ubuntu.png\n"
-            when /Windows XP/i
-                str << "LOGO=images/logos/windowsxp.png\n"
-            when /Windows/i
-                str << "LOGO=images/logos/windows8.png\n"
-            when /Linux/i
-                str << "LOGO=images/logos/linux.png\n"
-        end
-
-        return str
-    end
-
-    def to_one_template(template, has_nics_and_disks, rp, rp_list, vcenter_uuid)
-
-        template_name = template['name']
-        template_ref  = template['_ref']
-        template_ccr  = template['runtime.host.parent']
-        cluster_name  = template['runtime.host.parent.name']
-
-        one_tmp = {}
-        one_tmp[:name]                  = "#{template_name} - #{cluster_name}"
-        one_tmp[:template_name]         = template_name
-        one_tmp[:vcenter_ccr_ref]       = template_ccr._ref
-        one_tmp[:one]                   = to_one(true)
-        one_tmp[:vcenter_ref]           = template_ref
-        one_tmp[:vcenter_instance_uuid] = vcenter_uuid
-        one_tmp[:cluster_name]          = cluster_name
-        one_tmp[:rp]                    = rp
-        one_tmp[:rp_list]               = rp_list
-        one_tmp[:template]              = template
-        one_tmp[:import_disks_and_nics] = has_nics_and_disks
-        return one_tmp
-    end
-
-    def monitor
         reset_monitor
 
-        @state = state_to_c(self["summary.runtime.powerState"])
+        refresh_rate = 20 #20 seconds between samples (realtime)
 
-        if @state != VM_STATE[:active]
-            reset_monitor
-            return
-        end
+        @state = state_to_c(@vm_info["summary.runtime.powerState"])
 
-        cpuMhz = self["runtime.host.summary.hardware.cpuMhz"].to_f
+        return if @state != VM_STATE[:active]
 
-        @monitor[:used_memory] = self["summary.quickStats.hostMemoryUsage"] * 1024
+        cpuMhz =  esx_host_cpu[@vm_info["runtime.host"]._ref]
 
-        used_cpu = self["summary.quickStats.overallCpuUsage"].to_f / cpuMhz
+        @monitor[:used_memory] = @vm_info["summary.quickStats.hostMemoryUsage"].to_f * 1024
+
+        used_cpu = @vm_info["summary.quickStats.overallCpuUsage"].to_f / cpuMhz
         used_cpu = (used_cpu * 100).to_s
         @monitor[:used_cpu]  = sprintf('%.2f', used_cpu).to_s
 
@@ -2004,7 +2345,7 @@ class VirtualMachine
         @monitor[:used_cpu]    = 0 if @monitor[:used_cpu].to_i < 0
 
         guest_ip_addresses = []
-        self["guest.net"].each do |net|
+        @vm_info["guest.net"].each do |net|
             net.ipConfig.ipAddress.each do |ip|
                 guest_ip_addresses << ip.ipAddress
             end if net.ipConfig && net.ipConfig.ipAddress
@@ -2012,121 +2353,76 @@ class VirtualMachine
 
         @guest_ip_addresses = guest_ip_addresses.join(',')
 
-        ########################################################################
-        # PerfManager metrics
-        ########################################################################
-        pm = self['_connection'].serviceInstance.content.perfManager
+        if stats.key?(@item)
+            metrics = stats[@item][:metrics]
 
-        provider = pm.provider_summary(@item)
-
-        refresh_rate = provider.refreshRate
-
-        if !get_vm_id
-            @nettx       = 0
-            @netrx       = 0
-            @diskrdbytes = 0
-            @diskwrbytes = 0
-            @diskrdiops  = 0
-            @diskwriops  = 0
-        else
-            stats = []
-
-            if (one_item["MONITORING/LAST_MON"] && one_item["MONITORING/LAST_MON"].to_i != 0 )
-                #Real time data stores max 1 hour. 1 minute has 3 samples
-                interval = (Time.now.to_i - one_item["MONITORING/LAST_MON"].to_i)
-
-                #If last poll was more than hour ago get 3 minutes,
-                #else calculate how many samples since last poll
-                samples =  interval > 3600 ? 9 : (interval / refresh_rate) + 1
-                max_samples = samples > 0 ? samples : 1
-
-                stats = pm.retrieve_stats(
-                    [@item],
-                    ['net.transmitted','net.bytesRx','net.bytesTx','net.received',
-                    'virtualDisk.numberReadAveraged','virtualDisk.numberWriteAveraged',
-                    'virtualDisk.read','virtualDisk.write'],
-                    {interval:refresh_rate, max_samples: max_samples}
-                )
-            else
-                # First poll, get at least latest 3 minutes = 9 samples
-                stats = pm.retrieve_stats(
-                    [@item],
-                    ['net.transmitted','net.bytesRx','net.bytesTx','net.received',
-                    'virtualDisk.numberReadAveraged','virtualDisk.numberWriteAveraged',
-                    'virtualDisk.read','virtualDisk.write'],
-                    {interval:refresh_rate, max_samples: 9}
-                )
+            nettx_kbpersec = 0
+            if metrics['net.transmitted']
+                metrics['net.transmitted'].each { |sample|
+                    nettx_kbpersec += sample
+                }
             end
 
-            if stats.empty? || stats.first[1][:metrics].empty?
-                @nettx = 0
-                @netrx = 0
-                @diskrdbytes = 0
-                @diskwrbytes = 0
-                @diskrdiops = 0
-                @diskwriops = 0
-            else
-                metrics = stats.first[1][:metrics]
-
-                nettx_kbpersec = 0
-                if metrics['net.transmitted']
-                    metrics['net.transmitted'].each { |sample|
-                        nettx_kbpersec += sample
-                    }
-                end
-
-                netrx_kbpersec = 0
-                if metrics['net.bytesRx']
-                    metrics['net.bytesRx'].each { |sample|
-                        netrx_kbpersec += sample
-                    }
-                end
-
-                read_kbpersec = 0
-                if metrics['virtualDisk.read']
-                    metrics['virtualDisk.read'].each { |sample|
-                        read_kbpersec += sample
-                    }
-                end
-
-                read_iops = 0
-                if metrics['virtualDisk.numberReadAveraged']
-                    metrics['virtualDisk.numberReadAveraged'].each { |sample|
-                        read_iops += sample
-                    }
-                end
-
-                write_kbpersec = 0
-                if metrics['virtualDisk.write']
-                    metrics['virtualDisk.write'].each { |sample|
-                        write_kbpersec += sample
-                    }
-                end
-
-                write_iops = 0
-                if metrics['virtualDisk.numberWriteAveraged']
-                    metrics['virtualDisk.numberWriteAveraged'].each { |sample|
-                        write_iops += sample
-                    }
-                end
-
-                @nettx = (nettx_kbpersec * 1024 * refresh_rate).to_i
-                @netrx = (netrx_kbpersec * 1024 * refresh_rate).to_i
-
-                @diskrdiops = read_iops
-                @diskwriops = write_iops
-                @diskrdbytes = (read_kbpersec * 1024 * refresh_rate).to_i
-                @diskwrbytes = (write_kbpersec * 1024 * refresh_rate).to_i
-                @diskwrbytes = (write_kbpersec * 1024 * refresh_rate).to_i
+            netrx_kbpersec = 0
+            if metrics['net.bytesRx']
+                metrics['net.bytesRx'].each { |sample|
+                    netrx_kbpersec += sample
+                }
             end
+
+            read_kbpersec = 0
+            if metrics['virtualDisk.read']
+                metrics['virtualDisk.read'].each { |sample|
+                    read_kbpersec += sample
+                }
+            end
+
+            read_iops = 0
+            if metrics['virtualDisk.numberReadAveraged']
+                metrics['virtualDisk.numberReadAveraged'].each { |sample|
+                    read_iops += sample
+                }
+            end
+
+            write_kbpersec = 0
+            if metrics['virtualDisk.write']
+                metrics['virtualDisk.write'].each { |sample|
+                    write_kbpersec += sample
+                }
+            end
+
+            write_iops = 0
+            if metrics['virtualDisk.numberWriteAveraged']
+                metrics['virtualDisk.numberWriteAveraged'].each { |sample|
+                    write_iops += sample
+                }
+            end
+
+            # Accumulate values if present
+            previous_nettx = @one_item && @one_item["MONITORING/NETTX"] ? @one_item["MONITORING/NETTX"].to_i : 0
+            previous_netrx = @one_item && @one_item["MONITORING/NETRX"] ? @one_item["MONITORING/NETRX"].to_i : 0
+            previous_diskrdiops = @one_item && @one_item["MONITORING/DISKRDIOPS"] ? @one_item["MONITORING/DISKRDIOPS"].to_i : 0
+            previous_diskwriops = @one_item && @one_item["MONITORING/DISKWRIOPS"] ? @one_item["MONITORING/DISKWRIOPS"].to_i : 0
+            previous_diskrdbytes = @one_item && @one_item["MONITORING/DISKRDBYTES"] ? @one_item["MONITORING/DISKRDBYTES"].to_i : 0
+            previous_diskwrbytes = @one_item && @one_item["MONITORING/DISKWRBYTES"] ? @one_item["MONITORING/DISKWRBYTES"].to_i : 0
+
+            @monitor[:nettx] = previous_nettx + (nettx_kbpersec * 1024 * refresh_rate).to_i
+            @monitor[:netrx] = previous_netrx + (netrx_kbpersec * 1024 * refresh_rate).to_i
+
+            @monitor[:diskrdiops]  = previous_diskrdiops + read_iops
+            @monitor[:diskwriops]  = previous_diskwriops + write_iops
+            @monitor[:diskrdbytes] = previous_diskrdbytes + (read_kbpersec * 1024 * refresh_rate).to_i
+            @monitor[:diskwrbytes] = previous_diskwrbytes + (write_kbpersec * 1024 * refresh_rate).to_i
         end
     end
 
+
+
     #  Generates a OpenNebula IM Driver valid string with the monitor info
     def info
         return 'STATE=d' if @state == 'd'
 
-        guest_ip = self["guest.ipAddress"]
+        guest_ip = @vm_info["guest.ipAddress"]
 
         used_cpu    = @monitor[:used_cpu]
         used_memory = @monitor[:used_memory]
@@ -2137,11 +2433,13 @@ class VirtualMachine
         diskrdiops  = @monitor[:diskrdiops]
         diskwriops  = @monitor[:diskwriops]
 
-        esx_host      = self["runtime.host.name"].to_s
-        guest_state   = self["guest.guestState"].to_s
-        vmware_tools  = self["guest.toolsRunningStatus"].to_s
-        vmtools_ver   = self["guest.toolsVersion"].to_s
-        vmtools_verst = self["guest.toolsVersionStatus2"].to_s
+        esx_host      = @vm_info["cluster_name"].to_s
+        guest_state   = @vm_info["guest.guestState"].to_s
+        vmware_tools  = @vm_info["guest.toolsRunningStatus"].to_s
+        vmtools_ver   = @vm_info["guest.toolsVersion"].to_s
+        vmtools_verst = @vm_info["guest.toolsVersionStatus2"].to_s
+        rp_name       = @vm_info["rp_list"].select { |item| item._ref == self["_ref"]}.first[:name] rescue ""
+        rp_name       = "Resources" if rp_name.empty?
 
         str_info = ""
 
@@ -2151,8 +2449,6 @@ class VirtualMachine
             str_info << "GUEST_IP_ADDRESSES=\"" << @guest_ip_addresses.to_s << "\" "
         end
 
-        str_info << "LAST_MON=" << Time.now.to_i.to_s << " "
-
         str_info << "#{POLL_ATTRIBUTE[:state]}="  << @state               << " "
         str_info << "#{POLL_ATTRIBUTE[:cpu]}="    << used_cpu.to_s        << " "
         str_info << "#{POLL_ATTRIBUTE[:memory]}=" << used_memory.to_s     << " "
@@ -2169,7 +2465,7 @@ class VirtualMachine
         str_info << "VCENTER_VMWARETOOLS_RUNNING_STATUS=" << vmware_tools    << " "
         str_info << "VCENTER_VMWARETOOLS_VERSION="        << vmtools_ver     << " "
         str_info << "VCENTER_VMWARETOOLS_VERSION_STATUS=" << vmtools_verst   << " "
-        str_info << "VCENTER_RESOURCE_POOL=\""            << self["resourcePool"].name << "\" "
+        str_info << "VCENTER_RP_NAME=\""                  << rp_name << "\" "
     end
 
     def reset_monitor