1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-01-11 05:17:41 +03:00

Merge branch 'master' into feature-1540-2

This commit is contained in:
Sergio Vega 2019-01-28 13:54:44 -06:00
commit 9de1a9d4a6
40 changed files with 1087 additions and 293 deletions

27
.travis.yml Normal file
View File

@ -0,0 +1,27 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
dist: xenial
language: generic
before_install:
- sudo apt-get install -y libsystemd-dev bash-completion bison debhelper default-jdk flex javahelper libmysql++-dev libsqlite3-dev libssl-dev libws-commons-util-java libxml2-dev libxmlrpc3-client-java libxmlrpc3-common-java libxslt1-dev libcurl4-openssl-dev ruby scons libxmlrpc-c++8-dev npm libvncserver-dev
- gem install rubocop
- sudo npm install -g bower
- sudo npm install -g grunt
- sudo npm install -g grunt-cli
- (cd src/sunstone/public && npm install && bower install)
script:
- set -o errexit; source .travis/smoke_tests.sh

10
.travis/README.md Normal file
View File

@ -0,0 +1,10 @@
## Travis smoke tests
The `.travis/tests` directory contains scripts for each smoke test.
The smoke_test.sh script is called which iterates on each script, and it exits and logs on any failure. To add more tests, simply create a new file on `.travis/tests`.
Each test should:
- have a number as prefix to define the order. Renaming is allowed, the rule is to execute the less costly tests (in time) first
- return 0 on success, other number on error

50
.travis/smoke_tests.sh Executable file
View File

@ -0,0 +1,50 @@
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# Smoke tests for OpenNebula, to be triggered by travis or manually
# It executes all scripts in 'tests' folder and expects 0 exit code
#-------------------------------------------------------------------------------
# default parameters values
LOG_FILE='smoke_tests.results'
check_test() {
local TEST=$1
echo "Executing test $TEST" >> ${LOG_FILE}
eval $TEST >> ${LOG_FILE} 2>&1
RC=$?
echo "RC for $TEST is $RC"
return $RC
}
for smoke_test in .travis/tests/*.sh; do
check_test "$smoke_test" || break
done
if [ $RC == 0 ]; then
echo "All tests OK!"
else
echo "Test failed: "$smoke_test
echo "Log follows:"
cat $LOG_FILE
fi
exit $RC

19
.travis/tests/01-rubocop.sh Executable file
View File

@ -0,0 +1,19 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
# lint ruby code
ln -s share/linters/.rubocop.yml . && rubocop

19
.travis/tests/02-scons.sh Executable file
View File

@ -0,0 +1,19 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
# check that OpenNebula compiles
scons sunstone=yes mysql=yes systemd=yes new_xmlrpc=yes

View File

@ -103,7 +103,7 @@ class VirtualNetworkTemplateChmod : public RequestManagerChmod
public:
VirtualNetworkTemplateChmod():
RequestManagerChmod("one.vntemplate.chmod", "Changes permission bits of a "
"virtual network template", "A:siiiiiiiiiib")
"virtual network template")
{
Nebula& nd = Nebula::instance();
pool = nd.get_vntpool();

View File

@ -132,7 +132,7 @@ class VNTemplateClone : public RequestManagerClone
public:
VNTemplateClone():
RequestManagerClone("one.vntemplate.clone",
"Clone a virtual network template", "A:sisb")
"Clone a virtual network template", "A:sis")
{
Nebula& nd = Nebula::instance();
pool = nd.get_vntpool();
@ -144,9 +144,9 @@ public:
~VNTemplateClone(){};
ErrorCode request_execute(int source_id, const string &name, int &new_id,
bool recursive, const string& s_uattrs, RequestAttributes& att)
const string& s_uattrs, RequestAttributes& att)
{
return clone(source_id, name, new_id, recursive, s_uattrs, att);
return clone(source_id, name, new_id, false, s_uattrs, att);
};
protected:

View File

@ -121,7 +121,7 @@ class VirtualNetworkTemplateDelete : public RequestManagerDelete
public:
VirtualNetworkTemplateDelete():
RequestManagerDelete("one.vntemplate.delete",
"A:sib",
"A:si",
"Deletes a virtual network template")
{
Nebula& nd = Nebula::instance();
@ -133,7 +133,7 @@ public:
ErrorCode request_execute(int oid, bool recursive, RequestAttributes& att)
{
return delete_object(oid, recursive, att, auth_op);
return delete_object(oid, false, att, auth_op);
}
};

View File

@ -1,11 +1,615 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
#
# Configuration file for rubocop linter
# Applies to every ruby file on the OpenNebula repository
#
AllCops:
Include:
- 'src/cli'
Exclude:
- src/sunstone/public/node_modules/**/*
- src/tm_mad
- share/onegate/onegate
- share/scons/get_xmlrpc_config
- share/rubygems/generate
- share/hooks/raft/follower_cleanup
- share/esx-fw-vnc/Vagrantfile
- share/vendor/ruby/gems/rbvmomi/Rakefile
- share/vendor/ruby/gems/packethost/Rakefile
- share/vendor/ruby/gems/packethost/Gemfile
- share/vendor/ruby/gems/packethost/packethost.gemspec
- share/install_gems/Gemfile
- share/install_gems/install_gems
- src/authm_mad/remotes/dummy/authenticate
- src/authm_mad/remotes/plain/authenticate
- src/authm_mad/remotes/ldap/authenticate
- src/authm_mad/remotes/server_x509/authenticate
- src/authm_mad/remotes/server_cipher/authenticate
- src/authm_mad/remotes/x509/authenticate
- src/authm_mad/remotes/ssh/authenticate
- src/sunstone/bin/novnc-server
- src/sunstone/config.ru
- src/pm_mad/remotes/dummy/cancel
- src/pm_mad/remotes/dummy/shutdown
- src/pm_mad/remotes/dummy/reboot
- src/pm_mad/remotes/dummy/deploy
- src/pm_mad/remotes/dummy/reset
- src/pm_mad/remotes/dummy/poll
- src/pm_mad/remotes/packet/cancel
- src/pm_mad/remotes/packet/shutdown
- src/pm_mad/remotes/packet/reboot
- src/pm_mad/remotes/packet/deploy
- src/pm_mad/remotes/packet/reset
- src/pm_mad/remotes/packet/poll
- src/pm_mad/remotes/ec2/cancel
- src/pm_mad/remotes/ec2/shutdown
- src/pm_mad/remotes/ec2/reboot
- src/pm_mad/remotes/ec2/deploy
- src/pm_mad/remotes/ec2/reset
- src/pm_mad/remotes/ec2/poll
- src/onegate/config.ru
- src/datastore_mad/remotes/vcenter/monitor
- src/datastore_mad/remotes/vcenter/mkfs
- src/datastore_mad/remotes/vcenter/stat
- src/datastore_mad/remotes/vcenter/clone
- src/datastore_mad/remotes/vcenter/cp
- src/datastore_mad/remotes/vcenter/export
- src/datastore_mad/remotes/vcenter/rm
- src/im_mad/remotes/lxd-probes.d/pci.rb
- src/im_mad/remotes/kvm-probes.d/pci.rb
- src/im_mad/remotes/kvm.d/collectd-client.rb
- src/im_mad/remotes/lxd.d/collectd-client.rb
- src/im_mad/remotes/vcenter.d/poll
- src/im_mad/remotes/packet.d/poll
- src/im_mad/remotes/ec2.d/poll
- src/im_mad/remotes/one.d/poll
- src/im_mad/remotes/az.d/poll
- src/vnm_mad/remotes/ovswitch/post
- src/vnm_mad/remotes/ovswitch/clean
- src/vnm_mad/remotes/ovswitch/pre
- src/vnm_mad/remotes/802.1Q/post
- src/vnm_mad/remotes/802.1Q/update_sg
- src/vnm_mad/remotes/802.1Q/clean
- src/vnm_mad/remotes/802.1Q/pre
- src/vnm_mad/remotes/ebtables/post
- src/vnm_mad/remotes/ebtables/update_sg
- src/vnm_mad/remotes/ebtables/clean
- src/vnm_mad/remotes/ebtables/pre
- src/vnm_mad/remotes/bridge/clean
- src/vnm_mad/remotes/bridge/pre
- src/vnm_mad/remotes/vxlan/post
- src/vnm_mad/remotes/vxlan/update_sg
- src/vnm_mad/remotes/vxlan/clean
- src/vnm_mad/remotes/vxlan/pre
- src/vnm_mad/remotes/fw/post
- src/vnm_mad/remotes/fw/update_sg
- src/vnm_mad/remotes/fw/clean
- src/vnm_mad/remotes/fw/pre
- src/vnm_mad/remotes/ovswitch_vxlan/post
- src/vnm_mad/remotes/ovswitch_vxlan/clean
- src/vnm_mad/remotes/ovswitch_vxlan/pre
- src/vmm_mad/remotes/vcenter/attach_nic
- src/vmm_mad/remotes/vcenter/cancel
- src/vmm_mad/remotes/vcenter/snapshot_revert
- src/vmm_mad/remotes/vcenter/detach_nic
- src/vmm_mad/remotes/vcenter/snapshot_delete
- src/vmm_mad/remotes/vcenter/detach_disk
- src/vmm_mad/remotes/vcenter/shutdown
- src/vmm_mad/remotes/vcenter/attach_disk
- src/vmm_mad/remotes/vcenter/reboot
- src/vmm_mad/remotes/vcenter/deploy
- src/vmm_mad/remotes/vcenter/reset
- src/vmm_mad/remotes/vcenter/migrate
- src/vmm_mad/remotes/vcenter/reconfigure
- src/vmm_mad/remotes/vcenter/save
- src/vmm_mad/remotes/vcenter/restore
- src/vmm_mad/remotes/vcenter/snapshot_create
- src/vmm_mad/remotes/vcenter/poll
- src/vmm_mad/remotes/lxd/attach_nic
- src/vmm_mad/remotes/lxd/detach_nic
- src/vmm_mad/remotes/lxd/detach_disk
- src/vmm_mad/remotes/lxd/shutdown
- src/vmm_mad/remotes/lxd/attach_disk
- src/vmm_mad/remotes/lxd/reboot
- src/vmm_mad/remotes/lxd/deploy
- src/vmm_mad/remotes/lxd/prereconfigure
- src/vmm_mad/remotes/lxd/reconfigure
- src/vmm_mad/remotes/lxd/poll
- src/vmm_mad/remotes/one/cancel
- src/vmm_mad/remotes/one/shutdown
- src/vmm_mad/remotes/one/reboot
- src/vmm_mad/remotes/one/deploy
- src/vmm_mad/remotes/one/reset
- src/vmm_mad/remotes/one/save
- src/vmm_mad/remotes/one/restore
- src/vmm_mad/remotes/one/poll
- src/vmm_mad/remotes/kvm/poll
- src/vmm_mad/remotes/az/cancel
- src/vmm_mad/remotes/az/shutdown
- src/vmm_mad/remotes/az/reboot
- src/vmm_mad/remotes/az/deploy
- src/vmm_mad/remotes/az/save
- src/vmm_mad/remotes/az/restore
- src/vmm_mad/remotes/az/poll
- src/vmm_mad/remotes/packet/cancel
- src/vmm_mad/remotes/packet/shutdown
- src/vmm_mad/remotes/packet/reboot
- src/vmm_mad/remotes/packet/deploy
- src/vmm_mad/remotes/packet/reset
- src/vmm_mad/remotes/packet/poll
- src/vmm_mad/remotes/ec2/cancel
- src/vmm_mad/remotes/ec2/shutdown
- src/vmm_mad/remotes/ec2/reboot
- src/vmm_mad/remotes/ec2/deploy
- src/vmm_mad/remotes/ec2/save
- src/vmm_mad/remotes/ec2/restore
- src/vmm_mad/remotes/ec2/poll
- src/cloud/ec2/bin/econe-detach-volume
- src/cloud/ec2/bin/econe-start-instances
- src/cloud/ec2/bin/econe-associate-address
- src/cloud/ec2/bin/econe-create-volume
- src/cloud/ec2/bin/econe-delete-volume
- src/cloud/ec2/bin/econe-attach-volume
- src/cloud/ec2/bin/econe-stop-instances
- src/cloud/ec2/bin/econe-delete-keypair
- src/cloud/ec2/bin/econe-register
- src/cloud/ec2/bin/econe-release-address
- src/cloud/ec2/bin/econe-describe-images
- src/cloud/ec2/bin/econe-terminate-instances
- src/cloud/ec2/bin/econe-describe-keypairs
- src/cloud/ec2/bin/econe-describe-instances
- src/cloud/ec2/bin/econe-reboot-instances
- src/cloud/ec2/bin/econe-allocate-address
- src/cloud/ec2/bin/econe-upload
- src/cloud/ec2/bin/econe-describe-addresses
- src/cloud/ec2/bin/econe-run-instances
- src/cloud/ec2/bin/econe-disassociate-address
- src/cloud/ec2/bin/econe-create-keypair
- src/cloud/ec2/bin/econe-describe-volumes
- src/onedb/onedb
- src/market_mad/remotes/s3/monitor
- src/market_mad/remotes/s3/delete
- src/market_mad/remotes/s3/import
- src/market_mad/remotes/linuxcontainers/monitor
- src/market_mad/remotes/one/monitor
- src/tm_mad/vcenter/monitor
- src/tm_mad/vcenter/delete
- src/tm_mad/vcenter/mvds
- src/tm_mad/vcenter/mkimage
- src/tm_mad/vcenter/cpds
- src/tm_mad/vcenter/clone
- src/tm_mad/vcenter/mv
- src/tm_mad/vcenter/resize
- src/flow/config.ru
- src/flow/Gemfile
- src/cli/oneprovision
- share/scons/po2json.rb
- share/sudoers/sudo_commands.rb
- share/hooks/vcenter/delete_vcenter_net.rb
- share/hooks/vcenter/create_vcenter_net.rb
- share/hooks/ft/host_error.rb
- share/instance_types/ec2-instance-types.rb
- share/instance_types/az-instance-types.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/Datastore.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/DynamicTypeMgrDataTypeInfo.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/HostSystem.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ManagedObject.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/DynamicTypeMgrManagedTypeInfo.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ServiceInstance.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/PropertyCollector.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/DynamicTypeMgrAllTypeInfo.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ObjectUpdate.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ManagedEntity.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/Folder.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ReflectManagedMethodExecuter.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ResourcePool.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/OvfManager.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/Datacenter.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ObjectContent.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/VirtualMachine.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ComputeResource.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/PerfCounterInfo.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/PerformanceManager.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/Task.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/sms.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/fault.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/trivial_soap.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/sms/SmsStorageManager.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/deserialization.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/connection.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/basic_types.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/utils/perfdump.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/utils/admission_control.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/utils/deploy.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/utils/leases.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/trollop.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/version.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/pbm.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/type_loader.rb
- share/vendor/ruby/gems/packethost/spec/spec_helper.rb
- share/vendor/ruby/gems/packethost/spec/support/webmock.rb
- share/vendor/ruby/gems/packethost/spec/support/fake_packet.rb
- share/vendor/ruby/gems/packethost/spec/support/shared/entity.rb
- share/vendor/ruby/gems/packethost/spec/lib/packet_spec.rb
- share/vendor/ruby/gems/packethost/spec/lib/packet/client_spec.rb
- share/vendor/ruby/gems/packethost/spec/lib/packet/project_spec.rb
- share/vendor/ruby/gems/packethost/spec/lib/packet/configuration_spec.rb
- share/vendor/ruby/gems/packethost/spec/lib/packet/device_spec.rb
- share/vendor/ruby/gems/packethost/lib/packet.rb
- share/vendor/ruby/gems/packethost/lib/packet/project.rb
- share/vendor/ruby/gems/packethost/lib/packet/errors.rb
- share/vendor/ruby/gems/packethost/lib/packet/entity/base.rb
- share/vendor/ruby/gems/packethost/lib/packet/entity/serialization.rb
- share/vendor/ruby/gems/packethost/lib/packet/entity/associations.rb
- share/vendor/ruby/gems/packethost/lib/packet/entity/finders.rb
- share/vendor/ruby/gems/packethost/lib/packet/entity/timestamps.rb
- share/vendor/ruby/gems/packethost/lib/packet/entity/persistence.rb
- share/vendor/ruby/gems/packethost/lib/packet/entity.rb
- share/vendor/ruby/gems/packethost/lib/packet/facility.rb
- share/vendor/ruby/gems/packethost/lib/packet/operating_system.rb
- share/vendor/ruby/gems/packethost/lib/packet/device.rb
- share/vendor/ruby/gems/packethost/lib/packet/ip_range.rb
- share/vendor/ruby/gems/packethost/lib/packet/client.rb
- share/vendor/ruby/gems/packethost/lib/packet/version.rb
- share/vendor/ruby/gems/packethost/lib/packet/global_id.rb
- share/vendor/ruby/gems/packethost/lib/packet/configuration.rb
- share/vendor/ruby/gems/packethost/lib/packet/client/devices.rb
- share/vendor/ruby/gems/packethost/lib/packet/client/projects.rb
- share/vendor/ruby/gems/packethost/lib/packet/client/plans.rb
- share/vendor/ruby/gems/packethost/lib/packet/client/facilities.rb
- share/vendor/ruby/gems/packethost/lib/packet/client/ip_ranges.rb
- share/vendor/ruby/gems/packethost/lib/packet/client/ssh_keys.rb
- share/vendor/ruby/gems/packethost/lib/packet/client/users.rb
- share/vendor/ruby/gems/packethost/lib/packet/client/operating_systems.rb
- share/vendor/ruby/gems/packethost/lib/packet/ssh_key.rb
- share/vendor/ruby/gems/packethost/lib/packet/plan.rb
- share/vendor/ruby/gems/packethost/lib/packet/user.rb
- share/vendor/ruby/gems/packethost/lib/packethost.rb
- share/router/vmcontext.rb
- src/authm_mad/one_auth_mad.rb
- src/authm_mad/remotes/ldap/test/ldap_auth_spec.rb
- src/authm_mad/remotes/ldap/ldap_auth.rb
- src/authm_mad/remotes/server_x509/server_x509_auth.rb
- src/authm_mad/remotes/server_cipher/server_cipher_auth.rb
- src/authm_mad/remotes/x509/x509_auth.rb
- src/authm_mad/remotes/ssh/ssh_auth.rb
- src/sunstone/sunstone-server.rb
- src/sunstone/test/spec/spec_helper.rb
- src/sunstone/test/spec/vnet_spec.rb
- src/sunstone/test/spec/image_spec.rb
- src/sunstone/test/spec/vm_spec.rb
- src/sunstone/test/spec/host_spec.rb
- src/sunstone/test/spec/user_spec.rb
- src/sunstone/OpenNebulaAddons.rb
- src/sunstone/OpenNebulaVNC.rb
- src/sunstone/models/OpenNebulaJSON.rb
- src/sunstone/models/SunstoneViews.rb
- src/sunstone/models/SunstoneServer.rb
- src/sunstone/models/OpenNebulaJSON/SecurityGroupJSON.rb
- src/sunstone/models/OpenNebulaJSON/HostJSON.rb
- src/sunstone/models/OpenNebulaJSON/PoolJSON.rb
- src/sunstone/models/OpenNebulaJSON/TemplateJSON.rb
- src/sunstone/models/OpenNebulaJSON/DatastoreJSON.rb
- src/sunstone/models/OpenNebulaJSON/UserJSON.rb
- src/sunstone/models/OpenNebulaJSON/AclJSON.rb
- src/sunstone/models/OpenNebulaJSON/JSONUtils.rb
- src/sunstone/models/OpenNebulaJSON/VirtualMachineJSON.rb
- src/sunstone/models/OpenNebulaJSON/MarketPlaceAppJSON.rb
- src/sunstone/models/OpenNebulaJSON/ImageJSON.rb
- src/sunstone/models/OpenNebulaJSON/ZoneJSON.rb
- src/sunstone/models/OpenNebulaJSON/MarketPlaceJSON.rb
- src/sunstone/models/OpenNebulaJSON/VirtualNetworkJSON.rb
- src/sunstone/models/OpenNebulaJSON/VirtualRouterJSON.rb
- src/sunstone/models/OpenNebulaJSON/VirtualNetworkTemplateJSON.rb
- src/sunstone/models/OpenNebulaJSON/GroupJSON.rb
- src/sunstone/models/OpenNebulaJSON/ClusterJSON.rb
- src/sunstone/models/OpenNebulaJSON/VMGroupJSON.rb
- src/sunstone/models/OpenNebulaJSON/VdcJSON.rb
- src/sunstone/public/config.rb
- src/sunstone/routes/oneflow.rb
- src/sunstone/routes/vcenter.rb
- src/sunstone/routes/support.rb
- src/onegate/onegate-server.rb
- src/datastore_mad/remotes/vcenter_downloader.rb
- src/datastore_mad/remotes/vcenter_uploader.rb
- src/datastore_mad/remotes/xpath.rb
- src/datastore_mad/remotes/url.rb
- src/datastore_mad/one_datastore.rb
- src/im_mad/dummy/one_im_dummy.rb
- src/im_mad/im_exec/one_im_exec.rb
- src/im_mad/remotes/lxd-probes.d/lxd.rb
- src/im_mad/remotes/kvm-probes.d/kvm.rb
- src/im_mad/remotes/kvm-probes.d/machines-models.rb
- src/im_mad/remotes/common.d/collectd-client.rb
- src/im_mad/remotes/node-probes.d/pci.rb
- src/hm_mad/one_hm.rb
- src/vnm_mad/one_vnm.rb
- src/vnm_mad/remotes/ovswitch/OpenvSwitch.rb
- src/vnm_mad/remotes/802.1Q/vlan_tag_driver.rb
- src/vnm_mad/remotes/ebtables/Ebtables.rb
- src/vnm_mad/remotes/vxlan/vxlan.rb
- src/vnm_mad/remotes/vxlan/vxlan_driver.rb
- src/vnm_mad/remotes/ovswitch_vxlan/OpenvSwitchVXLAN.rb
- src/vnm_mad/remotes/lib/no_vlan.rb
- src/vnm_mad/remotes/lib/nic.rb
- src/vnm_mad/remotes/lib/vnmmad.rb
- src/vnm_mad/remotes/lib/security_groups.rb
- src/vnm_mad/remotes/lib/security_groups_iptables.rb
- src/vnm_mad/remotes/lib/vm.rb
- src/vnm_mad/remotes/lib/vnm_driver.rb
- src/vnm_mad/remotes/lib/sg_driver.rb
- src/vnm_mad/remotes/lib/command.rb
- src/vnm_mad/remotes/lib/vlan.rb
- src/vnm_mad/remotes/lib/address.rb
- src/cli/command_parser.rb
- src/cli/cli_helper.rb
- src/cli/one_helper.rb
- src/cli/one_helper/onehost_helper.rb
- src/cli/one_helper/onevmgroup_helper.rb
- src/cli/one_helper/onemarket_helper.rb
- src/cli/one_helper/onesecgroup_helper.rb
- src/cli/one_helper/onezone_helper.rb
- src/cli/one_helper/onetemplate_helper.rb
- src/cli/one_helper/onevm_helper.rb
- src/cli/one_helper/oneacct_helper.rb
- src/cli/one_helper/onequota_helper.rb
- src/cli/one_helper/oneuser_helper.rb
- src/cli/one_helper/oneimage_helper.rb
- src/cli/one_helper/onemarketapp_helper.rb
- src/cli/one_helper/onegroup_helper.rb
- src/cli/one_helper/onevnet_helper.rb
- src/cli/one_helper/oneacl_helper.rb
- src/cli/one_helper/onevcenter_helper.rb
- src/cli/one_helper/onecluster_helper.rb
- src/cli/one_helper/onevntemplate_helper.rb
- src/cli/one_helper/onevrouter_helper.rb
- src/cli/one_helper/oneprovision_helpers/host_helper.rb
- src/cli/one_helper/oneprovision_helpers/provision_helper.rb
- src/cli/one_helper/oneprovision_helpers/cluster_helper.rb
- src/cli/one_helper/oneprovision_helpers/common_helper.rb
- src/cli/one_helper/oneprovision_helpers/datastore_helper.rb
- src/cli/one_helper/oneprovision_helpers/vnet_helper.rb
- src/cli/one_helper/oneprovision_helpers/ansible_helper.rb
- src/cli/one_helper/onevdc_helper.rb
- src/cli/one_helper/onedatastore_helper.rb
- src/oca/ruby/test/VirtualMachine_spec.rb
- src/oca/ruby/test/VirtualMachinePool_spec.rb
- src/oca/ruby/test/XMLUtils_spec.rb
- src/oca/ruby/test/UserPool_spec.rb
- src/oca/ruby/test/Host_spec.rb
- src/oca/ruby/test/User_spec.rb
- src/oca/ruby/test/helpers/MockClient.rb
- src/oca/ruby/test/VirtualNetwork_spec.rb
- src/oca/ruby/test/HostPool_spec.rb
- src/oca/ruby/test/VirtualNetworkPool_spec.rb
- src/oca/ruby/opennebula.rb
- src/oca/ruby/opennebula/image.rb
- src/oca/ruby/opennebula/datastore.rb
- src/oca/ruby/opennebula/group_pool.rb
- src/oca/ruby/opennebula/template_pool.rb
- src/oca/ruby/opennebula/marketplaceapp_pool.rb
- src/oca/ruby/opennebula/acl_pool.rb
- src/oca/ruby/opennebula/virtual_machine_pool.rb
- src/oca/ruby/opennebula/pool.rb
- src/oca/ruby/opennebula/host_pool.rb
- src/oca/ruby/opennebula/security_group.rb
- src/oca/ruby/opennebula/cluster_pool.rb
- src/oca/ruby/opennebula/document.rb
- src/oca/ruby/opennebula/zone.rb
- src/oca/ruby/opennebula/virtual_router_pool.rb
- src/oca/ruby/opennebula/user_pool.rb
- src/oca/ruby/opennebula/xml_utils.rb
- src/oca/ruby/opennebula/virtual_router.rb
- src/oca/ruby/opennebula/document_json.rb
- src/oca/ruby/opennebula/marketplace.rb
- src/oca/ruby/opennebula/virtual_machine.rb
- src/oca/ruby/opennebula/xml_element.rb
- src/oca/ruby/opennebula/template.rb
- src/oca/ruby/opennebula/group.rb
- src/oca/ruby/opennebula/virtual_network.rb
- src/oca/ruby/opennebula/security_group_pool.rb
- src/oca/ruby/opennebula/pool_element.rb
- src/oca/ruby/opennebula/document_pool.rb
- src/oca/ruby/opennebula/vm_group_pool.rb
- src/oca/ruby/opennebula/vntemplate_pool.rb
- src/oca/ruby/opennebula/vdc_pool.rb
- src/oca/ruby/opennebula/datastore_pool.rb
- src/oca/ruby/opennebula/cluster.rb
- src/oca/ruby/opennebula/utils.rb
- src/oca/ruby/opennebula/acl.rb
- src/oca/ruby/opennebula/vntemplate.rb
- src/oca/ruby/opennebula/oneflow_client.rb
- src/oca/ruby/opennebula/host.rb
- src/oca/ruby/opennebula/vm_group.rb
- src/oca/ruby/opennebula/xml_pool.rb
- src/oca/ruby/opennebula/client.rb
- src/oca/ruby/opennebula/document_pool_json.rb
- src/oca/ruby/opennebula/zone_pool.rb
- src/oca/ruby/opennebula/error.rb
- src/oca/ruby/opennebula/image_pool.rb
- src/oca/ruby/opennebula/virtual_network_pool.rb
- src/oca/ruby/opennebula/system.rb
- src/oca/ruby/opennebula/marketplaceapp.rb
- src/oca/ruby/opennebula/marketplace_pool.rb
- src/oca/ruby/opennebula/vdc.rb
- src/oca/ruby/opennebula/user.rb
- src/oca/ruby/deprecated/OpenNebula.rb
- src/vmm_mad/dummy/one_vmm_dummy.rb
- src/vmm_mad/remotes/vcenter/vcenter_driver.rb
- src/vmm_mad/remotes/one/opennebula_driver.rb
- src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb
- src/vmm_mad/remotes/lib/vcenter_driver/vcenter_importer.rb
- src/vmm_mad/remotes/lib/vcenter_driver/vi_helper.rb
- src/vmm_mad/remotes/lib/vcenter_driver/network.rb
- src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb
- src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb
- src/vmm_mad/remotes/lib/vcenter_driver/file_helper.rb
- src/vmm_mad/remotes/lib/vcenter_driver/vi_client.rb
- src/vmm_mad/remotes/lib/vcenter_driver/host.rb
- src/vmm_mad/remotes/lib/vcenter_driver/datacenter.rb
- src/vmm_mad/remotes/lib/vcenter_driver/memoize.rb
- src/vmm_mad/remotes/lib/lxd/mapper/qcow2.rb
- src/vmm_mad/remotes/lib/lxd/mapper/mapper.rb
- src/vmm_mad/remotes/lib/lxd/mapper/rbd.rb
- src/vmm_mad/remotes/lib/lxd/mapper/raw.rb
- src/vmm_mad/remotes/lib/lxd/container.rb
- src/vmm_mad/remotes/lib/lxd/client.rb
- src/vmm_mad/remotes/lib/lxd/opennebula_vm.rb
- src/vmm_mad/remotes/lib/lxd/command.rb
- src/vmm_mad/remotes/lib/poll_common.rb
- src/vmm_mad/remotes/az/az_driver.rb
- src/vmm_mad/remotes/packet/packet_driver.rb
- src/vmm_mad/remotes/ec2/ec2_driver.rb
- src/vmm_mad/exec/one_vmm_exec.rb
- src/mad/ruby/ssh_stream.rb
- src/mad/ruby/test/MonkeyPatcher.rb
- src/mad/ruby/test/OpenNebulaDriver_spec.rb
- src/mad/ruby/VirtualMachineDriver.rb
- src/mad/ruby/ActionManager.rb
- src/mad/ruby/DriverExecHelper.rb
- src/mad/ruby/CommandManager.rb
- src/mad/ruby/test_mad.rb
- src/mad/ruby/OpenNebulaDriver.rb
- src/mad/ruby/scripts_common.rb
- src/ipamm_mad/one_ipam.rb
- src/cloud/common/CloudClient.rb
- src/cloud/common/CloudServer.rb
- src/cloud/common/CloudAuth.rb
- src/cloud/common/CloudAuth/RemoteCloudAuth.rb
- src/cloud/common/CloudAuth/X509CloudAuth.rb
- src/cloud/common/CloudAuth/EC2CloudAuth.rb
- src/cloud/common/CloudAuth/SunstoneCloudAuth.rb
- src/cloud/common/CloudAuth/OneGateCloudAuth.rb
- src/cloud/common/CloudAuth/OpenNebulaCloudAuth.rb
- src/cloud/ec2/lib/ebs.rb
- src/cloud/ec2/lib/elastic_ip.rb
- src/cloud/ec2/lib/ImageEC2.rb
- src/cloud/ec2/lib/net_ssh_replacement.rb
- src/cloud/ec2/lib/keypair.rb
- src/cloud/ec2/lib/EC2QueryServer.rb
- src/cloud/ec2/lib/econe-server.rb
- src/cloud/ec2/lib/EC2QueryClient.rb
- src/cloud/ec2/lib/tags.rb
- src/cloud/ec2/lib/instance.rb
- src/cloud/ec2/lib/econe_application.rb
- src/onedb/database_schema.rb
- src/onedb/fsck/image.rb
- src/onedb/fsck/datastore.rb
- src/onedb/fsck/quotas.rb
- src/onedb/fsck/history.rb
- src/onedb/fsck/vrouter.rb
- src/onedb/fsck/pool_control.rb
- src/onedb/fsck/marketplace.rb
- src/onedb/fsck/network.rb
- src/onedb/fsck/template.rb
- src/onedb/fsck/group.rb
- src/onedb/fsck/cluster.rb
- src/onedb/fsck/cluster_vnc_bitmap.rb
- src/onedb/fsck/host.rb
- src/onedb/fsck/vm.rb
- src/onedb/fsck/marketplaceapp.rb
- src/onedb/fsck/user.rb
- src/onedb/fsck.rb
- src/onedb/patches/ip4_ip6_static.rb
- src/onedb/patches/history_times.rb
- src/onedb/patches/marketapps_clean.rb
- src/onedb/patches/4.14_monitoring.rb
- src/onedb/patches/next_snapshot.rb
- src/onedb/vcenter_one54_pre.rb
- src/onedb/vcenter_one54.rb
- src/onedb/onedb.rb
- src/onedb/shared/4.2.0_to_4.3.80.rb
- src/onedb/shared/5.3.80_to_5.4.0.rb
- src/onedb/shared/3.0.0_to_3.1.0.rb
- src/onedb/shared/3.2.0_to_3.2.1.rb
- src/onedb/shared/3.6.0_to_3.7.80.rb
- src/onedb/shared/3.5.80_to_3.6.0.rb
- src/onedb/shared/5.6.0_to_5.7.80.rb
- src/onedb/shared/4.4.1_to_4.5.80.rb
- src/onedb/shared/3.1.0_to_3.1.80.rb
- src/onedb/shared/3.8.0_to_3.8.1.rb
- src/onedb/shared/2.0_to_2.9.80.rb
- src/onedb/shared/3.8.5_to_3.9.80.rb
- src/onedb/shared/4.5.80_to_4.6.0.rb
- src/onedb/shared/3.8.2_to_3.8.3.rb
- src/onedb/shared/4.0.1_to_4.1.80.rb
- src/onedb/shared/4.11.80_to_4.90.0.rb
- src/onedb/shared/3.9.80_to_3.9.90.rb
- src/onedb/shared/4.90.0_to_5.2.0.rb
- src/onedb/shared/4.6.0_to_4.11.80.rb
- src/onedb/shared/3.3.0_to_3.3.80.rb
- src/onedb/shared/3.4.1_to_3.5.80.rb
- src/onedb/shared/5.4.1_to_5.5.80.rb
- src/onedb/shared/4.3.85_to_4.3.90.rb
- src/onedb/shared/4.4.0_to_4.4.1.rb
- src/onedb/shared/3.8.1_to_3.8.2.rb
- src/onedb/shared/2.9.90_to_3.0.0.rb
- src/onedb/shared/3.8.3_to_3.8.4.rb
- src/onedb/shared/2.9.80_to_2.9.85.rb
- src/onedb/shared/5.2.0_to_5.3.80.rb
- src/onedb/shared/3.4.0_to_3.4.1.rb
- src/onedb/shared/5.4.0_to_5.4.1.rb
- src/onedb/shared/4.3.80_to_4.3.85.rb
- src/onedb/shared/3.2.1_to_3.3.0.rb
- src/onedb/shared/2.9.85_to_2.9.90.rb
- src/onedb/shared/3.8.4_to_3.8.5.rb
- src/onedb/shared/3.7.80_to_3.8.0.rb
- src/onedb/shared/3.1.80_to_3.2.0.rb
- src/onedb/shared/5.5.80_to_5.6.0.rb
- src/onedb/shared/3.3.80_to_3.4.0.rb
- src/onedb/shared/4.3.90_to_4.4.0.rb
- src/onedb/shared/3.9.90_to_4.0.0.rb
- src/onedb/shared/4.0.0_to_4.0.1.rb
- src/onedb/shared/4.1.80_to_4.2.0.rb
- src/onedb/local/5.3.80_to_5.4.0.rb
- src/onedb/local/4.13.80_to_4.13.85.rb
- src/onedb/local/5.6.0_to_5.7.80.rb
- src/onedb/local/5.4.1_to_5.5.80.rb
- src/onedb/local/4.13.85_to_4.90.0.rb
- src/onedb/local/5.4.0_to_5.4.1.rb
- src/onedb/local/4.11.80_to_4.13.80.rb
- src/onedb/local/4.10.3_to_4.11.80.rb
- src/onedb/local/4.90.0_to_5.3.80.rb
- src/onedb/local/4.5.80_to_4.7.80.rb
- src/onedb/local/4.9.80_to_4.10.3.rb
- src/onedb/local/5.5.80_to_5.6.0.rb
- src/onedb/local/4.7.80_to_4.9.80.rb
- src/onedb/onedb_live.rb
- src/onedb/onedb_backend.rb
- src/onedb/sqlite2mysql.rb
- src/market_mad/remotes/s3/S3.rb
- src/market_mad/one_market.rb
- src/tm_mad/one_tm.rb
- src/flow/lib/models/service_template_pool.rb
- src/flow/lib/models/service_pool.rb
- src/flow/lib/models/service_template.rb
- src/flow/lib/models/role.rb
- src/flow/lib/models/service.rb
- src/flow/lib/strategy.rb
- src/flow/lib/grammar.rb
- src/flow/lib/LifeCycleManager.rb
- src/flow/lib/log.rb
- src/flow/lib/models.rb
- src/flow/lib/validator.rb
- src/flow/lib/strategy/straight.rb
- src/flow/oneflow-server.rb
########
# LAYOUT
@ -169,6 +773,12 @@ Metrics/BlockLength:
Metrics/LineLength:
Max: 80
Metrics/ModuleLength:
Enabled: False
Metrics/ClassLength:
Enabled: False
# Parameter list config:
Metrics/ParameterLists:
Max: 5

View File

@ -1,18 +0,0 @@
AllCops:
Include:
- '**/Rakefile'
- lib/**/*
Documentation:
Enabled: false
LineLength:
Enabled: false
Style/PredicateName:
NamePrefix:
- is_
- have_
Style/MethodMissing:
Enabled: false

View File

@ -303,7 +303,7 @@ module CLIHelper
begin
if options[:csv]
puts CSV.generate_line(@default_columns)
puts CSV.generate_line(@default_columns) if !options[:noheader]
res_data.each {|l| puts CSV.generate_line(l) }
else
res_data.each{|l|

View File

@ -516,11 +516,13 @@ EOT
# List pool functions
#-----------------------------------------------------------------------
def start_pager
pager = ENV['ONE_PAGER'] || 'less'
pager = ENV['ONE_PAGER'] || 'more'
# Start pager, defaults to less
p_r, p_w = IO.pipe
Signal.trap('PIPE', 'SIG_IGN')
lpid = fork do
$stdin.reopen(p_r)
@ -531,7 +533,7 @@ EOT
exec([pager, pager])
end
# Send listing to pager pipe
$stdout.close
$stdout = p_w.dup
@ -547,6 +549,9 @@ EOT
begin
Process.wait(lpid)
rescue Interrupt
Process.kill("TERM", lpid)
Process.wait(lpid)
rescue Errno::ECHILD
end
end
@ -558,12 +563,11 @@ EOT
elements = 0
page = ""
pool.each {|e|
elements += 1
pool.each {|e|
elements += 1
page << e.to_xml(true) << "\n"
}
else
pname = pool.pool_name
ename = pool.element_name
@ -585,8 +589,8 @@ EOT
# output
#-----------------------------------------------------------------------
def list_pool_table(table, pool, options, filter_flag)
if $stdout.isatty and (!options.key?:no_pager)
size = $stdout.winsize[0] - 1
if $stdout.isatty and (!options.key?:no_pager)
size = $stdout.winsize[0] - 1
# ----------- First page, check if pager is needed -------------
rc = pool.get_page(size, 0)
@ -662,8 +666,8 @@ EOT
# List pool in XML format, pagination is used in interactive output
#-----------------------------------------------------------------------
def list_pool_xml(pool, options, filter_flag)
if $stdout.isatty
size = $stdout.winsize[0] - 1
if $stdout.isatty
size = $stdout.winsize[0] - 1
# ----------- First page, check if pager is needed -------------
rc = pool.get_page(size, 0)

View File

@ -16,28 +16,28 @@
# limitations under the License. #
#--------------------------------------------------------------------------- #
ONE_LOCATION=ENV["ONE_LOCATION"]
ONE_LOCATION = ENV['ONE_LOCATION']
if !ONE_LOCATION
RUBY_LIB_LOCATION="/usr/lib/one/ruby"
REMOTES_LOCATION="/var/lib/one/remotes"
ANSIBLE_LOCATION="/usr/share/one/oneprovision/ansible"
RUBY_LIB_LOCATION = '/usr/lib/one/ruby'
REMOTES_LOCATION = '/var/lib/one/remotes'
ANSIBLE_LOCATION = '/usr/share/one/oneprovision/ansible'
else
RUBY_LIB_LOCATION=ONE_LOCATION+"/lib/ruby"
REMOTES_LOCATION=ONE_LOCATION+"/var/remotes"
ANSIBLE_LOCATION=ONE_LOCATION+"/usr/share/oneprovision/ansible"
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby'
REMOTES_LOCATION = ONE_LOCATION + '/var/remotes'
ANSIBLE_LOCATION = ONE_LOCATION + '/usr/share/oneprovision/ansible'
end
$: << RUBY_LIB_LOCATION
$: << RUBY_LIB_LOCATION+"/cli"
$LOAD_PATH << RUBY_LIB_LOCATION
$LOAD_PATH << RUBY_LIB_LOCATION + '/cli'
$PING_TIMEOUT = 20
$PING_RETRIES = 10
$MAX_RETRIES = 3
$RUN_MODE = :interactive
$FAIL_CHOICE = :quit
$CLEANUP = false
$THREADS = 3
PING_TIMEOUT_DEFAULT = 20
PING_RETRIES_DEFAULT = 10
MAX_RETRIES_DEFAULT = 3
RUN_MODE_DEFAULT = :interactive
FAIL_CHOICE_DEFAULT = :quit
CLEANUP_DEFAULT = false
THREADS_DEFAULT = 3
require 'command_parser'
require 'one_helper'
@ -52,27 +52,27 @@ require 'tempfile'
require 'base64'
CommandParser::CmdParser.new(ARGV) do
usage "`oneprovision` <command> [<file>] [<hostid>] [<args>] [<options>]"
usage '`oneprovision` <command> [<file>] [<hostid>] [<args>] [<options>]'
version OpenNebulaHelper::ONE_VERSION
$common_helper = CommonHelper.new
$host_helper = OneProvisionHostHelper.new
$ansible_helper = OneProvisionAnsibleHelper.new
$provision_helper = OneProvisionProvisionHelper.new
$cluster_helper = OneProvisionClusterHelper.new
$datastore_helper = OneProvisionDatastoreHelper.new
$vnet_helper = OneProvisionVnetHelper.new
@common_helper = CommonHelper.new
@host_helper = OneProvisionHostHelper.new
@ansible_helper = OneProvisionAnsibleHelper.new
@provision_helper = OneProvisionProvisionHelper.new
@cluster_helper = OneProvisionClusterHelper.new
@datastore_helper = OneProvisionDatastoreHelper.new
@vnet_helper = OneProvisionVnetHelper.new
$mutex = Mutex.new
@mutex = Mutex.new
before_proc do
$common_helper.set_client(options)
$host_helper.set_client(options)
$ansible_helper.set_client(options)
$provision_helper.set_client(options)
$cluster_helper.set_client(options)
$datastore_helper.set_client(options)
$vnet_helper.set_client(options)
@common_helper.set_client(options)
@host_helper.set_client(options)
@ansible_helper.set_client(options)
@provision_helper.set_client(options)
@cluster_helper.set_client(options)
@datastore_helper.set_client(options)
@vnet_helper.set_client(options)
end
########################################################################
@ -80,125 +80,130 @@ CommandParser::CmdParser.new(ARGV) do
########################################################################
VERBOSE = {
:name => "verbose",
:short => "-d",
:large => "--verbose",
:description => "Set verbose logging mode",
:name => 'verbose',
:short => '-d',
:large => '--verbose',
:description => 'Set verbose logging mode'
}
DEBUG = {
:name => "debug",
:short => "-D",
:large => "--debug" ,
:description => "Set debug logging mode",
:name => 'debug',
:short => '-D',
:large => '--debug',
:description => 'Set debug logging mode',
:format => String
}
BATCH = {
:name => "batch",
:short => "-b",
:large => "--batch",
:description => "Run in non-interactive mode (no questions)",
:format => String,
:name => 'batch',
:short => '-b',
:large => '--batch',
:description => 'Run in non-interactive mode (no questions)',
:format => String
}
FAIL_RETRY = {
:name => "fail_retry",
:large => "--fail-retry number",
:description => "Set batch failover mode to number of retries",
:name => 'fail_retry',
:large => '--fail-retry number',
:description => 'Set batch failover mode to number of retries',
:format => Integer
}
FAIL_CLEANUP = {
:name => "fail_cleanup",
:large => "--fail-cleanup",
:description => "Set batch failover mode to clean up and quit",
:name => 'fail_cleanup',
:large => '--fail-cleanup',
:description => 'Set batch failover mode to clean up and quit'
}
FAIL_SKIP = {
:name => "fail_skip",
:large => "--fail-skip",
:description => "Set batch failover mode to skip failing part",
:name => 'fail_skip',
:large => '--fail-skip',
:description => 'Set batch failover mode to skip failing part'
}
FAIL_QUIT = {
:name => "fail_quit",
:large => "--fail-quit",
:description => "Set batch failover mode to quit (default)",
:name => 'fail_quit',
:large => '--fail-quit',
:description => 'Set batch failover mode to quit (default)'
}
FORCE = {
:name => "force",
:short => "-F",
:large => "--force",
:description => "Force configure to execute",
:name => 'force',
:short => '-F',
:large => '--force',
:description => 'Force configure to execute',
:format => String
}
HARD = {
:name => "hard",
:short => "-H",
:large => "--hard",
:description => "Reset the host",
:name => 'hard',
:short => '-H',
:large => '--hard',
:description => 'Reset the host',
:format => String
}
PING_TIMEOUT = {
:name => "ping_timeout",
:large => "--ping-timeout seconds",
:description => "Set timeout for ping (default: #{$PING_TIMEOUT} secs)",
:name => 'ping_timeout',
:large => '--ping-timeout seconds',
:description => "Set timeout for ping (default: \
#{PING_TIMEOUT_DEFAULT} secs)",
:format => Integer
}
PING_RETRIES = {
:name => "ping_retries",
:large => "--ping-retries number",
:description => "Set retries for ping (default: #{$PING_RETRIES})",
:name => 'ping_retries',
:large => '--ping-retries number',
:description => "Set retries for ping (default: \
#{PING_RETRIES_DEFAULT})",
:format => Integer
}
THREADS = {
:name => "threads",
:short => "-t threads",
:large => "--threads threads",
:description => "Set threads for create (default: #{$THREADS})",
:name => 'threads',
:short => '-t threads',
:large => '--threads threads',
:description => "Set threads for create (default: #{THREADS_DEFAULT})",
:format => Integer
}
DELETE_ALL = {
:name => "delete_all",
:large => "--delete-all",
:description => "Delete all vms and images first, then delete the resources."
:name => 'delete_all',
:large => '--delete-all',
:description => 'Delete all vms and images first, then delete the\
resources.'
}
INCREMENTAL = {
:name => "incremental",
:large => "--incremental",
:description => "Configure just new hosts, default is configure the whole provision."
:name => 'incremental',
:large => '--incremental',
:description => 'Configure just new hosts, default is configure \
the whole provision.'
}
DUMP = {
:name => "dump",
:large => "--dump",
:description => "Dump the configuration file result."
:name => 'dump',
:large => '--dump',
:description => 'Dump the configuration file result.'
}
MODES = CommandParser::OPTIONS - [ CommandParser::VERBOSE ] + [
MODES = CommandParser::OPTIONS - [CommandParser::VERBOSE] + [
VERBOSE, DEBUG, BATCH,
FAIL_RETRY, FAIL_CLEANUP, FAIL_SKIP, FAIL_QUIT ]
FAIL_RETRY, FAIL_CLEANUP, FAIL_SKIP, FAIL_QUIT
]
CREATE_OPTIONS = [ THREADS, MODES, PING_TIMEOUT, PING_RETRIES]
CREATE_OPTIONS = [THREADS, MODES, PING_TIMEOUT, PING_RETRIES]
########################################################################
# Formatters for arguments
########################################################################
set :format, :hostid, CommonHelper.to_id_desc do |arg|
$host_helper.to_id(arg)
@host_helper.to_id(arg)
end
set :format, :hostid_list, CommonHelper.list_to_id_desc do |arg|
$host_helper.list_to_id(arg)
@host_helper.list_to_id(arg)
end
########################################################################
@ -209,10 +214,10 @@ CommandParser::CmdParser.new(ARGV) do
Provision a new cluster via bare metal provider
EOT
command :create, create_desc, :config, :options=>CREATE_OPTIONS do
$common_helper.get_mode(options)
command :create, create_desc, :config, :options => CREATE_OPTIONS do
@common_helper.get_mode(options)
$provision_helper.create(args[0], options)
@provision_helper.create(args[0], options)
0
end
@ -223,8 +228,8 @@ CommandParser::CmdParser.new(ARGV) do
Validate configuration file
EOT
command :validate, validate_desc, [:config_file], :options=>DUMP do
$common_helper.validate_configuration(args[0], options)
command :validate, validate_desc, [:config_file], :options => DUMP do
@common_helper.validate_configuration(args[0], options)
0
end
@ -235,11 +240,14 @@ CommandParser::CmdParser.new(ARGV) do
List all avaliable provisions
EOT
#TODO add xml option
command :list, provision_list_desc, :options=>CommandParser::OPTIONS+CLIHelper::OPTIONS do
columns = ['clusters', 'hosts', 'networks', 'datastores']
# TODO: add xml option
command :list, provision_list_desc,
:options => CommandParser::OPTIONS + CLIHelper::OPTIONS do
columns = %w[clusters hosts networks datastores]
$provision_helper.format_pool(options).show($provision_helper.get_list(columns, true), options)
@provision_helper.format_pool(options)
.show(@provision_helper.get_list(columns, true),
options)
0
end
@ -250,8 +258,9 @@ CommandParser::CmdParser.new(ARGV) do
Show provision details
EOT
command :show, provision_show_desc, [:provisionid], :options=>CommandParser::OPTIONS do
$provision_helper.show(args[0], options)
command :show, provision_show_desc,
[:provisionid], :options => CommandParser::OPTIONS do
@provision_helper.show(args[0], options)
0
end
@ -262,43 +271,27 @@ CommandParser::CmdParser.new(ARGV) do
Run configuration in all provision hosts
EOT
command :configure, provision_configure_desc, :provisionid, :options=>[MODES,FORCE] do
$common_helper.get_mode(options)
command :configure, provision_configure_desc,
:provisionid, :options => [MODES, FORCE] do
@common_helper.get_mode(options)
$provision_helper.configure(args[0], options)
@provision_helper.configure(args[0], options)
0
end
###
=begin
provision_update_desc = <<-EOT
Update the provision, adding more resources
EOT
command :update, provision_update_desc, :provisionid, :config, :options=>[CREATE_OPTIONS, INCREMENTAL] do
$common_helper.get_mode(options)
$provision_helper.update(args[0], args[1], options)
0
end
=end
###
provision_delete_desc = <<-EOT
Deletes and unprovisions all the resources
EOT
command :delete, provision_delete_desc, :provisionid, :options=>[MODES,THREADS,DELETE_ALL] do
$common_helper.get_mode(options)
command :delete, provision_delete_desc, :provisionid,
:options => [MODES, THREADS, DELETE_ALL] do
@common_helper.get_mode(options)
provision = Provision.new(args[0])
provision.refresh
provision.delete((options.has_key? :delete_all))
provision.delete((options.key? :delete_all))
0
end
@ -311,10 +304,15 @@ CommandParser::CmdParser.new(ARGV) do
List all availables clusters
EOT
command [:cluster, :list], cluster_list_desc, :options=>CommandParser::OPTIONS+CLIHelper::OPTIONS+OpenNebulaHelper::OPTIONS + [OpenNebulaHelper::DESCRIBE] do
columns = ['hosts', 'networks', 'datastores']
command [:cluster, :list], cluster_list_desc,
:options => CommandParser::OPTIONS + CLIHelper::OPTIONS +
OpenNebulaHelper::OPTIONS +
[OpenNebulaHelper::DESCRIBE] do
columns = %w[hosts networks datastores]
$cluster_helper.format_pool(options).show($provision_helper.get_list(columns, false), options)
@cluster_helper.format_pool(options)
.show(@provision_helper.get_list(columns, false),
options)
0
end
@ -325,14 +323,17 @@ CommandParser::CmdParser.new(ARGV) do
Deletes and unprovisions the given cluster
EOT
command [:cluster, :delete], cluster_delete_desc, [:range,:clusterid_list], :options=>[MODES,FORCE] do
$common_helper.get_mode(options)
command [:cluster, :delete], cluster_delete_desc,
[:range, :clusterid_list], :options => [MODES, FORCE] do
@common_helper.get_mode(options)
$cluster_helper.perform_actions(args[0],options,"deleted") do |cluster|
@cluster_helper.perform_actions(args[0],
options,
'deleted') do |cluster|
rc = cluster.delete
if OpenNebula.is_error?(rc)
$common_helper.fail(rc.message)
@common_helper.fail(rc.message)
end
end
@ -343,31 +344,16 @@ CommandParser::CmdParser.new(ARGV) do
# Host Commands
########################################################################
=begin
host_create_desc = <<-EOT.unindent
Create a single host
EOT
command [:host, :create], create_desc, :config, :options=>CREATE_OPTIONS do
$common_helper.get_mode(options)
$provision_helper.create(args[0], options)
0
end
=end
###
host_resume_desc = <<-EOT.unindent
Resume the host
EOT
command [:host, :resume], host_resume_desc, [:range,:hostid_list], :options=>MODES do
$common_helper.get_mode(options)
command [:host, :resume], host_resume_desc,
[:range, :hostid_list], :options => MODES do
@common_helper.get_mode(options)
$host_helper.perform_actions(args[0],options,"enabled") do |host|
$host_helper.resume_host(host)
@host_helper.perform_actions(args[0], options, 'enabled') do |host|
@host_helper.resume_host(host)
end
end
@ -377,11 +363,12 @@ CommandParser::CmdParser.new(ARGV) do
Power off the host
EOT
command [:host, :poweroff], host_poweroff_desc, [:range,:hostid_list], :options=>MODES do
$common_helper.get_mode(options)
command [:host, :poweroff], host_poweroff_desc,
[:range, :hostid_list], :options => MODES do
@common_helper.get_mode(options)
$host_helper.perform_actions(args[0],options,"disabled") do |host|
$host_helper.poweroff_host(host)
@host_helper.perform_actions(args[0], options, 'disabled') do |host|
@host_helper.poweroff_host(host)
end
end
@ -391,11 +378,12 @@ CommandParser::CmdParser.new(ARGV) do
Reboot the host
EOT
command [:host, :reboot], host_reboot_desc, [:range,:hostid_list], :options=>[MODES, HARD] do
$common_helper.get_mode(options)
command [:host, :reboot], host_reboot_desc,
[:range, :hostid_list], :options => [MODES, HARD] do
@common_helper.get_mode(options)
$host_helper.perform_actions(args[0],options,"enabled") do |host|
$host_helper.reboot_host(host, options)
@host_helper.perform_actions(args[0], options, 'enabled') do |host|
@host_helper.reboot_host(host, options)
end
end
@ -405,16 +393,17 @@ CommandParser::CmdParser.new(ARGV) do
Unprovisions and deletes the given Host
EOT
command [:host, :delete], host_delete_desc, [:range,:hostid_list], :options=>MODES do
$common_helper.get_mode(options)
command [:host, :delete], host_delete_desc,
[:range, :hostid_list], :options => MODES do
@common_helper.get_mode(options)
$host_helper.perform_actions(args[0],options,"deleted") do |host|
@host_helper.perform_actions(args[0], options, 'deleted') do |host|
host.info
begin
$host_helper.delete_host(host)
rescue => e
$common_helper.fail("Delete failed on exception: #{e.to_s}")
@host_helper.delete_host(host)
rescue StandardError => e
@common_helper.fail("Delete failed on exception: #{e}")
end
end
end
@ -425,11 +414,12 @@ CommandParser::CmdParser.new(ARGV) do
Run configuration on the host
EOT
command [:host, :configure], host_configure_desc, [:range,:hostid_list], :options=>[MODES,FORCE] do
$common_helper.get_mode(options)
command [:host, :configure], host_configure_desc,
[:range, :hostid_list], :options => [MODES, FORCE] do
@common_helper.get_mode(options)
$host_helper.perform_actions(args[0],options,"configured") do |host|
$host_helper.configure_host([host], options)
@host_helper.perform_actions(args[0], options, 'configured') do |host|
@host_helper.configure_host([host], options)
end
end
@ -439,9 +429,10 @@ CommandParser::CmdParser.new(ARGV) do
Establish SSH conection to the host
EOT
command [:host, :ssh], host_ssh_desc, :hostid, [:command, nil], :options=>CommandParser::OPTIONS do
$host_helper.perform_action(args[0],options,"") do |host|
$host_helper.ssh_host(host, args)
command [:host, :ssh], host_ssh_desc, :hostid, [:command, nil],
:options => CommandParser::OPTIONS do
@host_helper.perform_action(args[0], options, '') do |host|
@host_helper.ssh_host(host, args)
end
end
@ -451,12 +442,15 @@ CommandParser::CmdParser.new(ARGV) do
Lists bare metal Hosts in the pool
EOT
command [:host, :list], host_list_desc, :options=>CommandParser::OPTIONS+CLIHelper::OPTIONS+OpenNebulaHelper::OPTIONS + [OpenNebulaHelper::DESCRIBE] do
if !options.has_key? :filter
options.merge!(filter: ["PROVIDER!=-"])
command [:host, :list], host_list_desc,
:options => CommandParser::OPTIONS + CLIHelper::OPTIONS +
OpenNebulaHelper::OPTIONS +
[OpenNebulaHelper::DESCRIBE] do
if !options.key? :filter
options[:filter] = ['PROVIDER!=-']
end
$host_helper.list_pool(options)
@host_helper.list_pool(options)
end
###
@ -465,12 +459,14 @@ CommandParser::CmdParser.new(ARGV) do
Lists bare metal Hosts continuously
EOT
command [:host, :top], host_top_desc, :options=>CommandParser::OPTIONS+CLIHelper::OPTIONS+OpenNebulaHelper::OPTIONS do
if !options.has_key? :filter
options.merge!(filter: ["PROVIDER!=-"])
command [:host, :top], host_top_desc,
:options => CommandParser::OPTIONS +
CLIHelper::OPTIONS + OpenNebulaHelper::OPTIONS do
if !options.key? :filter
options[:filter] = ['PROVIDER!=-']
end
$host_helper.list_pool(options, true)
@host_helper.list_pool(options, true)
end
########################################################################
@ -481,8 +477,15 @@ CommandParser::CmdParser.new(ARGV) do
List all availables datastores
EOT
command [:datastore, :list], datastore_list_desc, :options=>CommandParser::OPTIONS+CLIHelper::OPTIONS+OpenNebulaHelper::OPTIONS + [OpenNebulaHelper::DESCRIBE] do
$datastore_helper.format_pool(options).show($provision_helper.get_provision_resources(['datastores']), options)
command [:datastore, :list], datastore_list_desc,
:options => CommandParser::OPTIONS +
CLIHelper::OPTIONS +
OpenNebulaHelper::OPTIONS +
[OpenNebulaHelper::DESCRIBE] do
@datastore_helper.format_pool(options)
.show(@provision_helper
.get_provision_resources(['datastores']),
options)
0
end
@ -493,11 +496,13 @@ CommandParser::CmdParser.new(ARGV) do
Deletes and unprovisions the given datastore
EOT
command [:datastore, :delete], datastore_delete_desc, [:range,:datastoreid_list], :options=>[MODES,FORCE] do
$common_helper.get_mode(options)
command [:datastore, :delete], datastore_delete_desc,
[:range, :datastoreid_list], :options => [MODES, FORCE] do
@common_helper.get_mode(options)
$datastore_helper.perform_actions(args[0],options,"deleted") do |datastore|
$logger.info("Deleting datastore #{datastore['ID']}")
@datastore_helper
.perform_actions(args[0], options, 'deleted') do |datastore|
@logger.info("Deleting datastore #{datastore['ID']}")
datastore.delete
end
@ -513,8 +518,15 @@ CommandParser::CmdParser.new(ARGV) do
List all availables virtual networks
EOT
command [:vnet, :list], vnet_list_desc, :options=>CommandParser::OPTIONS+CLIHelper::OPTIONS+OpenNebulaHelper::OPTIONS + [OpenNebulaHelper::DESCRIBE] do
$vnet_helper.format_pool(options).show($provision_helper.get_provision_resources(['networks']), options)
command [:vnet, :list], vnet_list_desc,
:options => CommandParser::OPTIONS +
CLIHelper::OPTIONS +
OpenNebulaHelper::OPTIONS +
[OpenNebulaHelper::DESCRIBE] do
@vnet_helper.format_pool(options)
.show(@provision_helper
.get_provision_resources(['networks']),
options)
0
end
@ -525,11 +537,12 @@ CommandParser::CmdParser.new(ARGV) do
Deletes and unprovisions the given virtual network
EOT
command [:vnet, :delete], vnet_delete_desc, [:range,:vnetid_list], :options=>[MODES,FORCE] do
$common_helper.get_mode(options)
command [:vnet, :delete], vnet_delete_desc,
[:range, :vnetid_list], :options => [MODES, FORCE] do
@common_helper.get_mode(options)
$vnet_helper.perform_actions(args[0],options,"deleted") do |vnet|
$logger.info("Deleting vnet #{vnet['ID']}")
@vnet_helper.perform_actions(args[0], options, 'deleted') do |vnet|
@logger.info("Deleting vnet #{vnet['ID']}")
vnet.delete
end

View File

@ -557,8 +557,10 @@ CommandParser::CmdParser.new(ARGV) do
EOT
command :migrate, migrate_desc, [:range, :vmid_list], :hostid,
[:datastoreid, nil], :options => [ENFORCE, OneVMHelper::LIVE,
OneVMHelper::POFF, OneVMHelper::POFFHARD] do
[:datastoreid, nil], :options => [ENFORCE,
OneVMHelper::LIVE,
OneVMHelper::POFF,
OneVMHelper::POFFHARD] do
host_id = args[1]
verbose = "migrating to #{host_id}"
@ -795,7 +797,8 @@ CommandParser::CmdParser.new(ARGV) do
if ip
if !nic_alias && !nic_name
template = "NIC = [ NETWORK_ID = #{network_id}, IP = #{ip} ]"
template = "NIC = [ NETWORK_ID = #{network_id},\
IP = #{ip} ]"
elsif !nic_alias && nic_name
template = "NIC = [ NETWORK_ID = #{network_id},
IP = #{ip},
@ -1125,8 +1128,8 @@ CommandParser::CmdParser.new(ARGV) do
state, valid states are: pending, failure, poweroff, undeploy, hold or
cloning.
This command accepts a template file or opens an editor, the full list of
configuration attributes are:
This command accepts a template file or opens an editor, the full list
of configuration attributes are:
OS = ["ARCH", "MACHINE", "KERNEL", "INITRD", "BOOTLOADER", "BOOT"]
FEATURES = ["ACPI", "PAE", "APIC", "LOCALTIME", "HYPERV", "GUEST_AGENT"]
@ -1154,7 +1157,8 @@ CommandParser::CmdParser.new(ARGV) do
template = vm.template_like_str('TEMPLATE', true,
'OS | FEATURES | INPUT | '\
'GRAPHICS | RAW | CONTEXT | CPU_MODEL')
'GRAPHICS | RAW | CONTEXT | '\
'CPU_MODEL')
template = OpenNebulaHelper.editor_input(template)
end
@ -1163,8 +1167,8 @@ CommandParser::CmdParser.new(ARGV) do
end
lock_desc = <<-EOT.unindent
Locks a VM with differents levels for lock any actions with this VM, show and
monitoring never will be locked.
Locks a VM with differents levels for lock any actions with this VM,
show and monitoring never will be locked.
Aalid states are: All.
EOT

View File

@ -314,6 +314,8 @@ void DispatchManager::resubmit_action(int vid)
vm->set_state(VirtualMachine::PENDING);
vm->set_deploy_id(""); //reset the deploy-id
vmpool->update(vm);
vm->unlock();

View File

@ -88,13 +88,12 @@ class CollectdClient
# Collect the Data
ts = Time.now
data = run_probes
# Send signal to itself to run probes and send the data
Process.kill('HUP', $$)
run_probes_time = (Time.now - ts).to_i
# Send the Data
send data
# Sleep during the Cycle
sleep_time = @monitor_push_period - run_probes_time
sleep_time = 0 if sleep_time < 0
@ -130,4 +129,16 @@ sleep rand monitor_push_period
# Start push monitorization
client = CollectdClient.new(hypervisor, number, host, port, probes_args,
monitor_push_period)
Signal.trap('HUP') do
# ignore another HUP until we handle this one
this_handler = Signal.trap('HUP', 'IGNORE')
data = client.run_probes
client.send data
# set the handler back
Signal.trap('HUP', this_handler)
end
client.monitor

View File

@ -30,14 +30,9 @@ begin
machines = []
models = []
Open3.popen3("virsh -r -c qemu:///system capabilities") {|i, o, e, t|
if t.value.exitstatus != 0
exit -1
end
capabilities = o.read
}
cmd = 'virsh -r -c qemu:///system capabilities'
capabilities, e, s = Open3.capture3(cmd)
exit(-1) unless s.success?
cap_xml = REXML::Document.new(capabilities)
cap_xml = cap_xml.root
@ -94,12 +89,9 @@ begin
end
}
cpu_models = ""
Open3.popen3("virsh -r -c qemu:///system cpu-models #{a}") {|i, o, e, t|
break if t.value.exitstatus != 0
cpu_models = o.read
}
cmd = "virsh -r -c qemu:///system cpu-models #{a}"
cpu_models, e, s = Open3.capture3(cmd)
break unless s.success?
cpu_models.each_line { |l|
l.chomp!

View File

@ -994,3 +994,9 @@ function get_nic_information {
OUTBOUND_PEAK_KB="${XPATH_ELEMENTS[j++]}"
ORDER="${XPATH_ELEMENTS[j++]}"
}
function hup_collectd
{
SEND_HUP='kill -HUP `cat /tmp/one-collectd-client.pid` || true'
ssh_exec_and_log_no_error $1 "$SEND_HUP"
}

View File

@ -31,7 +31,7 @@ class LinuxContainersMarket
#---------------------------------------------------------------------------
DEFAULTS = {
:url => 'https://images.linuxcontainers.org',
:sizemb => 5120,
:sizemb => 2560,
:fs => 'ext4',
:format => 'raw',
:agent => 'OpenNebula'

View File

@ -67,4 +67,4 @@ EOF
)
ssh_exec_and_log $DST_HOST "$delete_file" "Error deleting $DST_PATH"
hup_collectd $DST_HOST

View File

@ -70,5 +70,6 @@ EOF
log "Making filesystem of ${SIZE}M and type $FSTYPE at $DST"
ssh_exec_and_log $DST_HOST "$MKSCRIPT" "Could not create image $DST_PATH"
hup_collectd $DST_HOST
exit 0

View File

@ -43,3 +43,4 @@ fi
ssh_exec_and_log "${SRC_HOST}" "qemu-img resize ${SRC_PATH} ${SIZE}M" \
"Error resizing image ${SRC_PATH}"
hup_collectd $SRC_HOST

View File

@ -142,4 +142,5 @@ EOF
ssh_exec_and_log "$DST_HOST" "$CLONE_CMD" \
"Error cloning $SRC_PATH to $LV_NAME"
hup_collectd $DST_HOST
exit 0

View File

@ -66,5 +66,6 @@ log "Dumping $SRC to $DST"
ssh_exec_and_log "$SRC_HOST" "$DUMP_CMD" \
"Error dumping $SRC to $DST"
hup_collectd $SRC_HOST
exit 0

View File

@ -84,3 +84,5 @@ else
ssh_exec_and_log "$DST_HOST" "$DELETE_CMD" \
"Error deleting $DST_PATH"
fi
hup_collectd $DST_HOST

View File

@ -154,3 +154,6 @@ fi
ssh_exec_and_log "$DST_HOST" "mv $SRC_PATH $DST_PATH" \
"Error moving VM files to another System DS: $SRC_PATH to $DST_PATH in $DST_HOST"
hup_collectd $DST_HOST
hup_collectd $SRC_HOST

View File

@ -75,5 +75,6 @@ ssh_exec_and_log "$SRC_HOST" "$DUMP_CMD" \
LOCK="tm-fs_lvm-${DS_SYS_ID}.lock"
exclusive "${LOCK}" 120 ssh_exec_and_log "$SRC_HOST" "$DELETE_CMD" \
"Error dumping $SRC to $DST"
hup_collectd $SRC_HOST
exit 0

View File

@ -93,3 +93,4 @@ if [ -n "$ORIGINAL_SIZE" -a "$SIZE" -gt "$ORIGINAL_SIZE" ]; then
"Error resizing image $DST"
fi
hup_collectd $DST_HOST

View File

@ -120,5 +120,6 @@ fi
log "Moving $SRC to datastore as $DST"
exec_and_log "$SCP -r $SRC $DST" "Error copying $SRC to $DST"
hup_collectd $SRC_HOST
exit 0

View File

@ -85,5 +85,7 @@ EOF
)
ssh_exec_and_log "$SRC_HOST" "$TAR_SSH" "Error copying disk directory to target host"
hup_collectd $DST_HOST
hup_collectd $SRC_HOST
exit 0

View File

@ -53,6 +53,7 @@ exec_and_log "$SCP -r $SRC $DST" "Error copying $SRC to $DST"
if $SSH $SRC_HOST ls ${SRC_PATH_SNAP} >/dev/null 2>&1; then
exec_and_log "rsync -r --delete ${SRC_HOST}:${SRC_PATH_SNAP}/ ${DST_SNAP}"
hup_collectd $SRC_HOST
fi
exit 0

View File

@ -82,4 +82,5 @@ EOT
ssh_exec_and_log "${SRC_HOST}" "${CMD}" \
"Error creating snapshot ${SNAP_PATH}"
hup_collectd $SRC_HOST

View File

@ -70,4 +70,5 @@ CURRENT_PATH=${DISK_PATH}
ssh_exec_and_log "${SRC_HOST}" "rm ${SNAP_PATH}" \
"Error deleting snapshot ${SNAP_PATH}"
hup_collectd $SRC_HOST

View File

@ -77,4 +77,5 @@ EOF
ssh_exec_and_log "${SRC_HOST}" "${CMD}" \
"Error reverting snapshot to ${SNAP_PATH}"
hup_collectd $SRC_HOST

View File

@ -2420,6 +2420,19 @@ void VirtualMachineManager::detach_nic_action(
return;
}
int uid = vm->get_created_by_uid();
int owner_id = vm->get_uid();
vm->unlock();
password = Nebula::instance().get_upool()->get_token_password(uid, owner_id);
vm = vmpool->get(vid);
if (vm == 0)
{
return;
}
if (!vm->hasHistory())
{
goto error_history;

View File

@ -169,9 +169,8 @@ class Container
err = 'cannot create user data directory:'
rc, o, e = Command.execute("sudo #{cmd}", true) if e.include?(err)
return [rc, o, e] unless rc != 0
OpenNebula.log_error("#{__method__}: Failed to run command #{cmd}: #{e}")
log = "Failed to run command #{cmd}: #{e}"
OpenNebula.log_error("#{__method__}: #{log}") unless rc.zero?
[rc, o, e]
end

View File

@ -20,7 +20,7 @@ $LOAD_PATH.unshift File.dirname(__FILE__)
require 'mapper'
class Qcow2Mapper < Mapper
class Qcow2Mapper < Mapper
# Max number of block devices. This should be set to the parameter used
# to load the nbd kernel module (default in kernel is 16)
@ -32,18 +32,20 @@ class Qcow2Mapper < Mapper
return if device.empty?
dsrc = one_vm.disk_source(disk)
cmd = "#{COMMANDS[:nbd]} -c #{device} #{dsrc}"
File.chmod(0o664, dsrc) if File.symlink?(one_vm.sysds_path)
File.chmod(0664, dsrc) if File.symlink?(one_vm.sysds_path)
map = "#{COMMANDS[:nbd]} -c #{device} #{dsrc}"
rc, _out, err = Command.execute(map, false)
rc, _out, err = Command.execute(cmd, true)
if rc != 0
unless rc.zero?
OpenNebula.log_error("#{__method__}: #{err}")
return
end
sleep 0.5 # TODO: improve settledown, lsblk -f fails
sleep 5 # wait for parts to come out
partitions = lsblk(device)
show_parts(device) unless partitions[0]['type'] == 'part'
device
end
@ -51,7 +53,7 @@ class Qcow2Mapper < Mapper
def do_unmap(device, _one_vm, _disk, _directory)
cmd = "#{COMMANDS[:nbd]} -d #{device}"
rc, _out, err = Command.execute(cmd, true)
rc, _out, err = Command.execute(cmd, false)
return true if rc.zero?
@ -61,21 +63,32 @@ class Qcow2Mapper < Mapper
private
def nbd_device()
def show_parts(device)
get_parts = "#{COMMANDS[:kpartx]} -s -av #{device}"
rc, _out, err = Command.execute(get_parts, false)
unless rc.zero?
OpenNebula.log_error("#{__method__}: #{err}")
return
end
end
def nbd_device
sys_parts = lsblk('')
device_id = -1
nbds = []
sys_parts.each { |p|
sys_parts.each do |p|
m = p['name'].match(/nbd(\d+)/)
next if !m
next unless m
nbds << m[1].to_i
}
end
NBDS_MAX.times { |i|
NBDS_MAX.times do |i|
return "/dev/nbd#{i}" unless nbds.include?(i)
}
end
OpenNebula.log_error("#{__method__}: Cannot find free nbd device")

View File

@ -44,7 +44,7 @@ class FSRawMapper < Mapper
return true if rc.zero?
OpenNebula.log_error("#{__method__}: #{err}") if rc != 0
OpenNebula.log_error("#{__method__}: #{err}")
nil
end

View File

@ -129,19 +129,17 @@ module LXD
state
end
def lxc_path(vm_name)
path = 'lxc/' + vm_name
path = "#{ENV['LXC_CGROUP_PREFIX']}#{path}" if ENV['LXC_CGROUP_PREFIX']
end
end
def get_memory(vm_name)
begin
stat = File.read('/sys/fs/cgroup/memory/' + lxc_path(vm_name) + '/memory.usage_in_bytes').to_i
stat / 1024
rescue StandardError
return 0
end
stat = File.read('/sys/fs/cgroup/memory/' + lxc_path(vm_name) + '/memory.usage_in_bytes').to_i
stat / 1024
rescue StandardError
0
end
def get_net_statistics(vmd)
@ -176,7 +174,7 @@ module LXD
cpu_jiffies = get_cpu_jiffies - start_cpu_jiffies
vm_names.each do |vm_name|
cpu_used[vm_name] = (get_process_jiffies(vm_name).to_f -
cpu_used[vm_name] = (get_process_jiffies(vm_name).to_f -
cpu_used[vm_name]) / cpu_jiffies
cpu_used[vm_name] = (cpu_used[vm_name] * multiplier).round(2)
@ -205,7 +203,7 @@ module LXD
def get_process_jiffies(vm_name)
begin
jiffies = 0
stat = File.read('/sys/fs/cgroup/cpu,cpuacct/' + lxc_path(vm_name) + '/cpuacct.stat')
stat.lines.each {|line| jiffies += line.split(' ')[1] }
rescue StandardError
@ -232,20 +230,19 @@ module LXD
arch = container.architecture
capacity = container.expanded_config
cpu = ""
vcpu= ""
mem = ""
cpu = ''
vcpu = ''
mem = ''
if capacity
cpu = capacity['limits.cpu.allowance']
vcpu = capacity['limits.cpu']
mem = capacity['limits.memory']
mem = capacity['limits.memory']
end
cpu = "50%" if !cpu || cpu.empty?
vcpu = "1" if !vcpu || vcpu.empty?
mem = "512M" if !mem || mem.empty?
cpu = '50%' if !cpu || cpu.empty?
vcpu = '1' if !vcpu || vcpu.empty?
mem = '512MB' if !mem || mem.empty?
cpu = cpu.chomp('%').to_f / 100
mem = parse_memory(mem)

View File

@ -110,7 +110,12 @@ module VNMMAD
end
if deploy_id && vm.vm_info[:dumpxml].nil?
vm.vm_info[:dumpxml] = YAML.safe_load(`lxc config show #{deploy_id} 2>/dev/null`)
cmd = "lxc config show #{deploy_id} 2>/dev/null"
config = YAML.safe_load(`#{cmd}`)
config = YAML.safe_load(`sudo #{cmd}`) if config.nil?
vm.vm_info[:dumpxml] = config
vm.vm_info.each_key do |k|
vm.vm_info[k] = nil if vm.vm_info[k].to_s.strip.empty?