1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-02-22 17:57:46 +03:00

F #5228: LXC driver set

co-authored-by: Christian González <cgonzalez@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.systems>
This commit is contained in:
Ruben S. Montero 2021-03-04 21:07:50 +01:00
parent 17e9164472
commit 6cadde8eef
No known key found for this signature in database
GPG Key ID: A0CEA6FA880A1D87
65 changed files with 2345 additions and 341 deletions

View File

@ -322,10 +322,12 @@ VAR_DIRS="$VAR_LOCATION/remotes \
$VAR_LOCATION/remotes/etc/im/kvm-probes.d \
$VAR_LOCATION/remotes/etc/im/qemu-probes.d \
$VAR_LOCATION/remotes/etc/im/lxd-probes.d \
$VAR_LOCATION/remotes/etc/im/lxc-probes.d \
$VAR_LOCATION/remotes/etc/im/firecracker-probes.d \
$VAR_LOCATION/remotes/etc/market/http \
$VAR_LOCATION/remotes/etc/vmm/kvm \
$VAR_LOCATION/remotes/etc/vmm/lxd \
$VAR_LOCATION/remotes/etc/vmm/lxc \
$VAR_LOCATION/remotes/etc/vmm/firecracker \
$VAR_LOCATION/remotes/etc/vmm/vcenter \
$VAR_LOCATION/remotes/etc/vnm \
@ -359,6 +361,13 @@ VAR_DIRS="$VAR_LOCATION/remotes \
$VAR_LOCATION/remotes/im/lxd-probes.d/vm/monitor \
$VAR_LOCATION/remotes/im/lxd-probes.d/vm/status \
$VAR_LOCATION/remotes/im/lxd-probes.d/vm/snapshot \
$VAR_LOCATION/remotes/im/lxc.d \
$VAR_LOCATION/remotes/im/lxc-probes.d/host/beacon \
$VAR_LOCATION/remotes/im/lxc-probes.d/host/monitor \
$VAR_LOCATION/remotes/im/lxc-probes.d/host/system \
$VAR_LOCATION/remotes/im/lxc-probes.d/vm/monitor \
$VAR_LOCATION/remotes/im/lxc-probes.d/vm/status \
$VAR_LOCATION/remotes/im/lxc-probes.d/vm/snapshot \
$VAR_LOCATION/remotes/im/firecracker.d \
$VAR_LOCATION/remotes/im/firecracker-probes.d/host/beacon \
$VAR_LOCATION/remotes/im/firecracker-probes.d/host/monitor \
@ -403,6 +412,7 @@ VAR_DIRS="$VAR_LOCATION/remotes \
$VAR_LOCATION/remotes/vmm/az \
$VAR_LOCATION/remotes/vmm/one \
$VAR_LOCATION/remotes/vmm/lxd \
$VAR_LOCATION/remotes/vmm/lxc \
$VAR_LOCATION/remotes/vmm/packet \
$VAR_LOCATION/remotes/vmm/firecracker \
$VAR_LOCATION/remotes/vnm \
@ -570,6 +580,7 @@ INSTALL_FILES=(
IM_PROBES_FIRECRACKER_FILES:$VAR_LOCATION/remotes/im/firecracker.d
IM_PROBES_DUMMY_FILES:$VAR_LOCATION/remotes/im/dummy.d
IM_PROBES_LXD_FILES:$VAR_LOCATION/remotes/im/lxd.d
IM_PROBES_LXC_FILES:$VAR_LOCATION/remotes/im/lxc.d
IM_PROBES_VCENTER_FILES:$VAR_LOCATION/remotes/im/vcenter.d
IM_PROBES_EC2_FILES:$VAR_LOCATION/remotes/im/ec2.d
IM_PROBES_AZ_FILES:$VAR_LOCATION/remotes/im/az.d
@ -601,6 +612,13 @@ INSTALL_FILES=(
IM_PROBES_LXD_VM_STATUS_FILES:$VAR_LOCATION/remotes/im/lxd-probes.d/vm/status
IM_PROBES_LXD_PROBES_FILES:$VAR_LOCATION/remotes/im/lxd-probes.d
IM_PROBES_ETC_LXD_PROBES_FILES:$VAR_LOCATION/remotes/etc/im/lxd-probes.d
IM_PROBES_LXC_HOST_BEACON_FILES:$VAR_LOCATION/remotes/im/lxc-probes.d/host/beacon
IM_PROBES_LXC_HOST_MONITOR_FILES:$VAR_LOCATION/remotes/im/lxc-probes.d/host/monitor
IM_PROBES_LXC_HOST_SYSTEM_FILES:$VAR_LOCATION/remotes/im/lxc-probes.d/host/system
IM_PROBES_LXC_VM_MONITOR_FILES:$VAR_LOCATION/remotes/im/lxc-probes.d/vm/monitor
IM_PROBES_LXC_VM_STATUS_FILES:$VAR_LOCATION/remotes/im/lxc-probes.d/vm/status
IM_PROBES_LXC_PROBES_FILES:$VAR_LOCATION/remotes/im/lxc-probes.d
IM_PROBES_ETC_LXC_PROBES_FILES:$VAR_LOCATION/remotes/etc/im/lxc-probes.d
IM_PROBES_AZ_HOST_BEACON_FILES:$VAR_LOCATION/remotes/im/az-probes.d/host/beacon
IM_PROBES_AZ_HOST_MONITOR_FILES:$VAR_LOCATION/remotes/im/az-probes.d/host/monitor
IM_PROBES_AZ_HOST_SYSTEM_FILES:$VAR_LOCATION/remotes/im/az-probes.d/host/system
@ -641,10 +659,13 @@ INSTALL_FILES=(
VMM_EXEC_KVM_SCRIPTS:$VAR_LOCATION/remotes/vmm/kvm
VMM_EXEC_LXD_SCRIPTS:$VAR_LOCATION/remotes/vmm/lxd
VMM_EXEC_LXD_LIB:$VAR_LOCATION/remotes/vmm/lxd
VMM_EXEC_LXC_SCRIPTS:$VAR_LOCATION/remotes/vmm/lxc
VMM_EXEC_LXC_LIB:$VAR_LOCATION/remotes/vmm/lxc
VMM_EXEC_FIRECRACKER_SCRIPTS:$VAR_LOCATION/remotes/vmm/firecracker
VMM_EXEC_FIRECRACKER_LIB:$VAR_LOCATION/remotes/vmm/firecracker
VMM_EXEC_ETC_KVM_SCRIPTS:$VAR_LOCATION/remotes/etc/vmm/kvm
VMM_EXEC_ETC_LXD_SCRIPTS:$VAR_LOCATION/remotes/etc/vmm/lxd
VMM_EXEC_ETC_LXC_SCRIPTS:$VAR_LOCATION/remotes/etc/vmm/lxc
VMM_EXEC_ETC_FIRECRACKER_SCRIPTS:$VAR_LOCATION/remotes/etc/vmm/firecracker
VMM_EXEC_VCENTER_SCRIPTS:$VAR_LOCATION/remotes/vmm/vcenter
VMM_EXEC_ETC_VCENTER_SCRIPTS:$VAR_LOCATION/remotes/etc/vmm/vcenter
@ -1016,7 +1037,9 @@ MADS_LIB_FILES="src/mad/sh/madcommon.sh \
#-------------------------------------------------------------------------------
# Common library files for VMM drivers
#-------------------------------------------------------------------------------
VMM_EXEC_LIB="src/vmm_mad/remotes/lib/command.rb"
VMM_EXEC_LIB="src/vmm_mad/remotes/lib/command.rb \
src/vmm_mad/remotes/lib/xmlparser.rb \
src/vmm_mad/remotes/lib/opennebula_vm.rb"
#-------------------------------------------------------------------------------
# VMM Lib vcenter files, used by the vCenter Driver to be installed in
@ -1102,6 +1125,41 @@ VMM_EXEC_LXD_LIB="src/vmm_mad/remotes/lib/lxd/opennebula_vm.rb \
src/vmm_mad/remotes/lib/lxd/command.rb \
src/vmm_mad/remotes/lib/lxd/container.rb"
#-------------------------------------------------------------------------------
# VMM SH Driver LXC scripts, to be installed under $REMOTES_LOCATION/vmm/lxc
#-------------------------------------------------------------------------------
VMM_EXEC_LXC_SCRIPTS="
src/vmm_mad/remotes/lxc/attach_disk \
src/vmm_mad/remotes/lxc/deploy \
src/vmm_mad/remotes/lxc/prereconfigure \
src/vmm_mad/remotes/lxc/reset \
src/vmm_mad/remotes/lxc/restore \
src/vmm_mad/remotes/lxc/snapshot_create \
src/vmm_mad/remotes/lxc/attach_nic \
src/vmm_mad/remotes/lxc/detach_disk \
src/vmm_mad/remotes/lxc/migrate \
src/vmm_mad/remotes/lxc/reboot \
src/vmm_mad/remotes/lxc/resize \
src/vmm_mad/remotes/lxc/save \
src/vmm_mad/remotes/lxc/snapshot_delete \
src/vmm_mad/remotes/lxc/cancel \
src/vmm_mad/remotes/lxc/detach_nic \
src/vmm_mad/remotes/lxc/migrate_local \
src/vmm_mad/remotes/lxc/reconfigure \
src/vmm_mad/remotes/lxc/resize_disk \
src/vmm_mad/remotes/lxc/shutdown \
src/vmm_mad/remotes/lxc/snapshot_revert"
VMM_EXEC_LXC_LIB="src/vmm_mad/remotes/lib/lxc/opennebula_vm.rb \
src/vmm_mad/remotes/lib/lxc/client.rb \
src/vmm_mad/remotes/lib/lxc/command.rb \
src/vmm_mad/remotes/lib/lxc/container.rb \
src/vmm_mad/remotes/lib/lxc/storage/mappers/qcow2.rb \
src/vmm_mad/remotes/lib/lxc/storage/mappers/raw.rb \
src/vmm_mad/remotes/lib/lxc/storage/mappers/rbd.rb \
src/vmm_mad/remotes/lib/lxc/storage/storageutils.rb"
#-------------------------------------------------------------------------------
# VMM SH Driver Firecracker scripts, to be installed under $REMOTES_LOCATION/vmm/firecracker
#-------------------------------------------------------------------------------
@ -1120,6 +1178,12 @@ VMM_EXEC_FIRECRACKER_LIB="src/vmm_mad/remotes/lib/firecracker/opennebula_vm.rb \
VMM_EXEC_ETC_LXD_SCRIPTS="src/vmm_mad/remotes/lxd/lxdrc"
#-------------------------------------------------------------------------------
# VMM configuration LXD scripts, to be installed under $REMOTES_LOCATION/etc/vmm/lxd
#-------------------------------------------------------------------------------
VMM_EXEC_ETC_LXC_SCRIPTS="src/vmm_mad/remotes/lxc/lxcrc"
#-------------------------------------------------------------------------------
# VMM configuration Firecracker scripts, to be installed under $REMOTES_LOCATION/etc/vmm/firecracker
#-------------------------------------------------------------------------------
@ -1280,6 +1344,7 @@ IM_PROBES_FILES="\
IM_PROBES_LIB_FILES="\
src/im_mad/remotes/lib/kvm.rb \
src/im_mad/remotes/lib/lxd.rb \
src/im_mad/remotes/lib/lxc.rb \
src/im_mad/remotes/lib/linux.rb \
src/im_mad/remotes/lib/firecracker.rb\
src/im_mad/remotes/lib/numa_common.rb \
@ -1424,6 +1489,38 @@ IM_PROBES_ETC_LXD_PROBES_FILES="\
src/im_mad/remotes/lxd-probes.d/pci.conf \
src/im_mad/remotes/lib/probe_db.conf"
# LXC PROBES
IM_PROBES_LXC_FILES="\
src/im_mad/remotes/lxc.d/monitord-client_control.sh \
src/im_mad/remotes/lxc.d/monitord-client.rb"
IM_PROBES_LXC_HOST_BEACON_FILES="\
src/im_mad/remotes/lxc-probes.d/host/beacon/monitord-client-shepherd.sh \
src/im_mad/remotes/lxc-probes.d/host/beacon/date.sh"
IM_PROBES_LXC_HOST_MONITOR_FILES="\
src/im_mad/remotes/lxc-probes.d/host/monitor/linux_usage.rb \
src/im_mad/remotes/lxc-probes.d/host/monitor/numa_usage.rb"
IM_PROBES_LXC_HOST_SYSTEM_FILES="\
src/im_mad/remotes/lxc-probes.d/host/system/architecture.sh \
src/im_mad/remotes/lxc-probes.d/host/system/cpu.sh \
src/im_mad/remotes/lxc-probes.d/host/system/linux_host.rb \
src/im_mad/remotes/lxc-probes.d/host/system/monitor_ds.rb \
src/im_mad/remotes/lxc-probes.d/host/system/name.sh \
src/im_mad/remotes/lxc-probes.d/host/system/numa_host.rb \
src/im_mad/remotes/lxc-probes.d/host/system/version.sh"
IM_PROBES_LXC_VM_MONITOR_FILES="\
src/im_mad/remotes/lxc-probes.d/vm/monitor/poll.rb \
src/im_mad/remotes/lxc-probes.d/vm/monitor/monitor_ds_vm.rb"
IM_PROBES_LXC_VM_STATUS_FILES="\
src/im_mad/remotes/lxc-probes.d/vm/status/state.rb"
IM_PROBES_ETC_LXC_PROBES_FILES="\
src/im_mad/remotes/lib/probe_db.conf"
# Firecracker PROBES
IM_PROBES_FIRECRACKER_FILES="\
src/im_mad/remotes/firecracker.d/monitord-client_control.sh \

View File

@ -0,0 +1,2 @@
options loop max_loop=256
options nbd nbds_max=256

View File

@ -0,0 +1,2 @@
loop
nbd

View File

@ -497,6 +497,34 @@ VM_MAD = [
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# LXC Virtualization Driver Manager Configuration
# -r number of retries when monitoring a host
# -t number of threads, i.e. number of hosts monitored at the same time
# -l <actions[=command_name]> actions executed locally, command can be
# overridden for each action.
# Valid actions: deploy, shutdown, cancel, save, restore, migrate, poll
# An example: "-l migrate=migrate_local,save"
# -d <actions> comma separated list of actions which forward SSH agent
# from frontend to remote host (default migrate)
# -p more than one action per host in parallel, needs support from hypervisor
# -s <shell> to execute remote commands, bash by default
# -w Timeout in seconds to execute external commands (default unlimited)
#
#-------------------------------------------------------------------------------
VM_MAD = [
NAME = "lxc",
SUNSTONE_NAME = "LXC",
EXECUTABLE = "one_vmm_exec",
ARGUMENTS = "-t 15 -r 0 lxc",
# DEFAULT = "vmm_exec/vmm_exec_lxc.conf",
TYPE = "xml",
KEEP_SNAPSHOTS = "no",
IMPORTED_VMS_ACTIONS = "terminate, terminate-hard, reboot, reboot-hard, poweroff, poweroff-hard, suspend, resume, stop, delete, nic-attach, nic-detach"
]
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Firecracker Virtualization Driver Manager Configuration
# -r number of retries when monitoring a host

View File

@ -2,13 +2,14 @@ Defaults:oneadmin !requiretty
Defaults:oneadmin secure_path = /sbin:/bin:/usr/sbin:/usr/bin
Cmnd_Alias ONE_CEPH = /usr/bin/rbd
Cmnd_Alias ONE_FIRECRACKER = /usr/bin/jailer, /usr/bin/mount, /usr/sbin/one-clean-firecracker-domain, /usr/sbin/one-prepare-firecracker-domain
Cmnd_Alias ONE_FIRECRACKER = /usr/bin/jailer, /usr/sbin/one-clean-firecracker-domain, /usr/sbin/one-prepare-firecracker-domain
Cmnd_Alias ONE_HA = /usr/bin/systemctl start opennebula-flow, /usr/bin/systemctl stop opennebula-flow, /usr/bin/systemctl start opennebula-gate, /usr/bin/systemctl stop opennebula-gate, /usr/bin/systemctl start opennebula-hem, /usr/bin/systemctl stop opennebula-hem, /usr/bin/systemctl start opennebula-showback.timer, /usr/bin/systemctl stop opennebula-showback.timer, /usr/sbin/service opennebula-flow start, /usr/sbin/service opennebula-flow stop, /usr/sbin/service opennebula-gate start, /usr/sbin/service opennebula-gate stop, /usr/sbin/service opennebula-hem start, /usr/sbin/service opennebula-hem stop, /usr/sbin/arping, /usr/sbin/ip address *
Cmnd_Alias ONE_LVM = /usr/sbin/lvcreate, /usr/sbin/lvremove, /usr/sbin/lvs, /usr/sbin/vgdisplay, /usr/sbin/lvchange, /usr/sbin/lvscan, /usr/sbin/lvextend
Cmnd_Alias ONE_LXC = /usr/bin/mount, /usr/bin/umount, /usr/bin/bindfs, /usr/sbin/losetup, /usr/bin/qemu-nbd, /usr/bin/lxc-attach, /usr/bin/lxc-config, /usr/bin/lxc-create, /usr/bin/lxc-destroy, /usr/bin/lxc-info, /usr/bin/lxc-ls, /usr/bin/lxc-start, /usr/bin/lxc-stop, /usr/bin/lxc-console, /usr/sbin/e2fsck, /usr/sbin/resize2fs, /usr/sbin/xfs_growfs, /usr/bin/rbd-nbd
Cmnd_Alias ONE_MARKET = /usr/lib/one/sh/create_container_image.sh, /usr/lib/one/sh/create_docker_image.sh
Cmnd_Alias ONE_NET = /usr/sbin/ebtables, /usr/sbin/iptables, /usr/sbin/ip6tables, /usr/sbin/ipset, /usr/sbin/ip link *, /usr/sbin/ip tuntap *, /usr/sbin/ip route *, /usr/sbin/ip neighbour *
Cmnd_Alias ONE_OVS = /usr/bin/ovs-ofctl, /usr/bin/ovs-vsctl
## Command aliases are enabled individually in dedicated
## sudoers files by each OpenNebula component (server, node).
# oneadmin ALL=(ALL) NOPASSWD: ONE_CEPH, ONE_FIRECRACKER, ONE_HA, ONE_LVM, ONE_MARKET, ONE_NET, ONE_OVS
# oneadmin ALL=(ALL) NOPASSWD: ONE_CEPH, ONE_FIRECRACKER, ONE_HA, ONE_LVM, ONE_LXC, ONE_MARKET, ONE_NET, ONE_OVS

View File

@ -2,9 +2,10 @@ Defaults:oneadmin !requiretty
Defaults:oneadmin secure_path = /sbin:/bin:/usr/sbin:/usr/bin
Cmnd_Alias ONE_CEPH = /usr/bin/rbd
Cmnd_Alias ONE_FIRECRACKER = /usr/bin/jailer, /bin/mount, /usr/sbin/one-clean-firecracker-domain, /usr/sbin/one-prepare-firecracker-domain
Cmnd_Alias ONE_FIRECRACKER = /usr/bin/jailer, /usr/sbin/one-clean-firecracker-domain, /usr/sbin/one-prepare-firecracker-domain
Cmnd_Alias ONE_HA = /bin/systemctl start opennebula-flow, /bin/systemctl stop opennebula-flow, /bin/systemctl start opennebula-gate, /bin/systemctl stop opennebula-gate, /bin/systemctl start opennebula-hem, /bin/systemctl stop opennebula-hem, /bin/systemctl start opennebula-showback.timer, /bin/systemctl stop opennebula-showback.timer, /usr/sbin/service opennebula-flow start, /usr/sbin/service opennebula-flow stop, /usr/sbin/service opennebula-gate start, /usr/sbin/service opennebula-gate stop, /usr/sbin/service opennebula-hem start, /usr/sbin/service opennebula-hem stop, /usr/bin/arping, /sbin/ip address *
Cmnd_Alias ONE_LVM = /sbin/lvcreate, /sbin/lvremove, /sbin/lvs, /sbin/vgdisplay, /sbin/lvchange, /sbin/lvscan, /sbin/lvextend
Cmnd_Alias ONE_LXC = /bin/mount, /bin/umount, /usr/bin/bindfs, /sbin/losetup, /usr/bin/qemu-nbd, /usr/bin/lxc-attach, /usr/bin/lxc-config, /usr/bin/lxc-create, /usr/bin/lxc-destroy, /usr/bin/lxc-info, /usr/bin/lxc-ls, /usr/bin/lxc-start, /usr/bin/lxc-stop, /usr/bin/lxc-console, /sbin/e2fsck, /sbin/resize2fs, /usr/sbin/xfs_growfs, /usr/bin/rbd-nbd
Cmnd_Alias ONE_LXD = /snap/bin/lxc, /usr/bin/catfstab, /bin/mount, /bin/umount, /bin/mkdir, /bin/lsblk, /sbin/losetup, /sbin/kpartx, /usr/bin/qemu-nbd, /sbin/blkid, /sbin/e2fsck, /sbin/resize2fs, /usr/sbin/xfs_growfs, /usr/bin/rbd-nbd, /usr/sbin/xfs_admin, /sbin/tune2fs
Cmnd_Alias ONE_MARKET = /usr/lib/one/sh/create_container_image.sh, /usr/lib/one/sh/create_docker_image.sh
Cmnd_Alias ONE_NET = /sbin/ebtables, /sbin/iptables, /sbin/ip6tables, /sbin/ipset, /sbin/ip link *, /sbin/ip tuntap *, /sbin/ip route *, /sbin/ip neighbour *
@ -12,4 +13,4 @@ Cmnd_Alias ONE_OVS = /usr/bin/ovs-ofctl, /usr/bin/ovs-vsctl
## Command aliases are enabled individually in dedicated
## sudoers files by each OpenNebula component (server, node).
# oneadmin ALL=(ALL) NOPASSWD: ONE_CEPH, ONE_FIRECRACKER, ONE_HA, ONE_LVM, ONE_LXD, ONE_MARKET, ONE_NET, ONE_OVS
# oneadmin ALL=(ALL) NOPASSWD: ONE_CEPH, ONE_FIRECRACKER, ONE_HA, ONE_LVM, ONE_LXC, ONE_LXD, ONE_MARKET, ONE_NET, ONE_OVS

View File

@ -0,0 +1 @@
oneadmin ALL=(ALL:ALL) NOPASSWD: ONE_LXC, ONE_NET, ONE_OVS, ONE_CEPH

View File

@ -66,7 +66,12 @@ class Sudoers
#{lib_location}/sh/create_docker_image.sh ],
:FIRECRACKER => %w[/usr/bin/jailer
/usr/sbin/one-clean-firecracker-domain
/usr/sbin/one-prepare-firecracker-domain]
/usr/sbin/one-prepare-firecracker-domain],
:LXC => %w[
mount umount bindfs losetup qemu-nbd lxc-attach lxc-config
lxc-create lxc-destroy lxc-info lxc-ls lxc-start lxc-stop
lxc-console e2fsck resize2fs xfs_growfs rbd-nbd
]
}
end

View File

@ -0,0 +1,244 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
$LOAD_PATH.unshift "#{File.dirname(__FILE__)}/../../vmm/lxc/"
require 'json'
require 'base64'
require 'client'
require_relative 'process_list'
require_relative 'domain'
#-------------------------------------------------------------------------------
# Extends ProcessList module defined at process_list.rb
#-------------------------------------------------------------------------------
module ProcessList
# Number of seconds to average process usage
AVERAGE_SECS = 1
def self.retrieve_names
LXCClient.new.list
end
# list of process indexed by uuid, each entry:
# :pid
# :memory
# :cpu
def self.process_list
procs = {}
client = LXCClient.new
client.list.each do |container|
# sudo lxc-info -SH container
info = client.info(container, { :H => nil })
next if info['State'].downcase == 'stopped'
procs[container] = {
:pid => info['PID'],
:memory => Integer(info['Memory use']) / 1024,
:cpu => usage_cpu(container)
}
end
procs
end
def self.usage_cpu(container)
multiplier = `grep -c processor /proc/cpuinfo`.to_i * 100
cpuj0 = Jiffies.cpu
container_cpuj0 = Jiffies.process(container)
sleep 1 # measure diff
cpuj1 = Jiffies.cpu - cpuj0
container_cpuj1 = (Jiffies.process(container) - container_cpuj0)
((container_cpuj1 / cpuj1) * multiplier).round(2)
end
# --------------------------------------------------------------------------
# Compute process and total system jiffies
# --------------------------------------------------------------------------
module Jiffies
def self.process(container)
jiffies = 0
path = '/sys/fs/cgroup/cpu,cpuacct/lxc.payload.' \
"#{container}/cpuacct.stat"
begin
stat = File.read(path)
rescue StandardError
return 0
end
stat.lines.each {|line| jiffies += line.split(' ')[1].to_i }
jiffies.to_f
end
def self.cpu
begin
stat = File.read('/proc/stat')
rescue StandardError
return 0
end
jiffies = 0
# skip cpu string and guest jiffies
stat.lines.first.split(' ')[1..-3].each do |num|
jiffies += num.to_i
end
jiffies
end
end
end
#-------------------------------------------------------------------------------
# This class represents an LXC domain, information includes:
# @vm[:name]
# @vm[:id] from one-<id>
# @vm[:uuid] (deployment id)
# @vm[:deploy_id] (deployment id)
# @vm[:lxc_state] LXC state
# @vm[:state] OpenNebula state
# @vm[:netrx]
# @vm[:nettx]
# @vm[:diskrdbytes]
# @vm[:diskwrbytes]
# @vm[:diskrdiops]
# @vm[:diskwriops]
#
# This class uses the LXCClient and ProcessList interface
#-------------------------------------------------------------------------------
class Domain < BaseDomain
# Gets the information of the domain, fills the @vm hash using ProcessList
# and ps command
def info
client = LXCClient.new
# Flush the microVM metrics
hash = client.info(@name, { :H => nil })
return -1 if hash.nil?
@vm[:name] = @name
@vm[:uuid] = @name
@vm[:deploy_id] = @name
m = @vm[:name].match(/^one-(\d*)$/)
if m
@vm[:id] = m[1]
else
@vm[:id] = -1
end
@vm[:lxc_state] = hash['State']
state = STATE_MAP[hash['State']] || 'UNKNOWN'
@vm[:state] = state
io_stats(hash)
end
private
# --------------------------------------------------------------------------
# LXC states for the guest are
# * 'STOPPED'
# * 'STARTING'
# * 'RUNNING'
# * 'ABORTING'
# * 'STOPPING'
# https://linuxcontainers.org/lxc/manpages/man7/lxc.7.html
# --------------------------------------------------------------------------
STATE_MAP = {
'STOPPED' => 'POWEROFF', # Transitory state to RUNNING or POWEROFF
'STARTING' => 'RUNNING',
'RUNNING' => 'RUNNING',
'ABORTING' => 'FAILURE',
'STOPPING' => 'RUNNING' # Transitory state to POWEROFF
}
# Get the I/O stats of the domain as provided by Libvirt command domstats
# The metrics are aggregated for all DIKS and NIC
def io_stats(domain_info)
@vm[:netrx] = 0
@vm[:nettx] = 0
return if @vm[:state] != 'RUNNING'
# Add RX bytes of every NIC
Array(domain_info['RX bytes']).each do |i|
@vm[:netrx] += Integer(i)
end
Array(domain_info['TX bytes']).each do |i|
@vm[:nettx] += Integer(i)
end
end
end
#-------------------------------------------------------------------------------
# This module provides a basic interface to get the list of domains in
# the system and convert the information to be added to monitor or system
# messages.
#
# It also gathers the state information of the domains for the state probe
#-------------------------------------------------------------------------------
module DomainList
############################################################################
# Module Interface
############################################################################
def self.info
domains = LXCDomains.new
domains.info
domains.to_monitor
end
def self.state_info(_host, _host_id)
domains = LXCDomains.new
domains.state_info
end
############################################################################
# This is the implementation class for the module logic
############################################################################
class LXCDomains < BaseDomains
include ProcessList
end
end

View File

@ -0,0 +1 @@
../../../common.d/date.sh

View File

@ -0,0 +1,29 @@
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
(
[ -f /tmp/one-monitord-client.pid ] || exit 0
running_pid=$(cat /tmp/one-monitord-client.pid)
pids=$(ps axuwww | grep -e "/monitord-client.rb lxc" | grep -v grep | awk '{ print $2 }' | grep -v "^${running_pid}$")
if [ -n "$pids" ]; then
kill -6 $pids
fi
) > /dev/null

View File

@ -0,0 +1,21 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require_relative '../../../lib/linux'
LinuxHost.usage('lxc')

View File

@ -0,0 +1 @@
../../../node-probes.d/numa_usage.rb

View File

@ -0,0 +1 @@
../../../node-probes.d/architecture.sh

View File

@ -0,0 +1 @@
../../../node-probes.d/cpu.sh

View File

@ -0,0 +1,21 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require_relative '../../../lib/linux'
LinuxHost.config('lxc')

View File

@ -0,0 +1 @@
../../../node-probes.d/monitor_ds.rb

View File

@ -0,0 +1 @@
../../../node-probes.d/name.sh

View File

@ -0,0 +1 @@
../../../node-probes.d/numa_host.rb

View File

@ -0,0 +1 @@
../../../common.d/version.sh

View File

@ -0,0 +1 @@
../../../node-probes.d/monitor_ds_vm.rb

View File

@ -0,0 +1,5 @@
#!/usr/bin/ruby
require_relative '../../../lib/lxc'
puts DomainList.info

View File

@ -0,0 +1,28 @@
#!/usr/bin/ruby
require_relative '../../../lib/probe_db'
require_relative '../../../lib/lxc'
xml_txt = STDIN.read
host = ARGV[-1]
host_id = ARGV[-2]
begin
config = REXML::Document.new(xml_txt).root
sync = config.elements['PROBES_PERIOD/SYNC_STATE_VM'].text.to_i
rescue StandardError
sync = 180
end
begin
vmdb = VirtualMachineDB.new('lxc',
host,
host_id,
:missing_state => 'POWEROFF',
:sync => sync)
vmdb.purge
puts vmdb.to_status
rescue StandardError => e
puts e
end

View File

@ -0,0 +1 @@
../common.d/monitord-client.rb

View File

@ -0,0 +1 @@
../common.d/monitord-client_control.sh

View File

@ -18,7 +18,7 @@
require_relative '../../../lib/numa_common'
# This module extracts NUMA information from a host for firecracker
# This module extracts NUMA information from a host
module NUMA
def self.node_to_template(node, nid)

View File

@ -24,8 +24,7 @@ module LXDMarket
# TODO: Make configurable
def template
unindent(<<-EOS)
HYPERVISOR = \"lxd\"
SCHED_REQUIREMENTS = \"HYPERVISOR=\\\"lxd\\\"\"
SCHED_REQUIREMENTS = \"HYPERVISOR=\\\"lx*\\\"\"
CPU = \"1\"
MEMORY = \"768\"
LXD_SECURITY_PRIVILEGED = \"true\"

View File

@ -164,6 +164,21 @@ IM_MAD = [
# -t number of threads, i.e. number of hosts monitored at the same time
# -w Timeout in seconds to execute external commands (default unlimited)
#-------------------------------------------------------------------------------
IM_MAD = [
NAME = "lxc",
SUNSTONE_NAME = "LXC",
EXECUTABLE = "one_im_ssh",
ARGUMENTS = "-r 3 -t 15 -w 90 lxc",
THREADS = 0
]
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# LXD UDP-push Information Driver Manager Configuration
# -r number of retries when monitoring a host
# -t number of threads, i.e. number of hosts monitored at the same time
# -w Timeout in seconds to execute external commands (default unlimited)
#-------------------------------------------------------------------------------
IM_MAD = [
NAME = "firecracker",
SUNSTONE_NAME = "Firecracker",

View File

@ -322,5 +322,7 @@ module OpenNebula
def is_paginated?
PAGINATED_POOLS.include?(@pool_name)
end
end
end

View File

@ -37,9 +37,6 @@ microvm = MicroVM.new_from_xml(xml, nil)
# Clean in case the microVM was shutdown from inside
microvm.clean
microvm.gen_deployment_file
microvm.gen_logs_files
# Create microVM
rc = microvm.create

View File

@ -41,6 +41,7 @@ module Command
execute(cmd, lock) unless running?(cmd.split[0])
end
# Returns true/false if status is 0/!=0 and logs error if needed
def self.execute_rc_log(cmd, lock = false)
rc, _stdout, stderr = execute(cmd, lock)
@ -49,6 +50,15 @@ module Command
rc.zero?
end
# Execute cmd and logs error if needed
def self.execute_log(cmd, lock = false)
rc = execute(cmd, lock)
STDERR.puts rc[2] unless rc[0].zero?
rc
end
def self.execute_detach(cmd)
pid = Process.fork do
exec(cmd)

View File

@ -27,31 +27,23 @@ class MicroVM
# rubocop:disable Naming/AccessorMethodName
# rubocop:disable Layout/LineLength
#---------------------------------------------------------------------------
# List of commands executed by the driver.
#---------------------------------------------------------------------------
COMMANDS = {
:clean => 'sudo -n /usr/sbin/one-clean-firecracker-domain',
:map_context => '/var/tmp/one/vmm/firecracker/map_context',
:prepare_domain => 'sudo -n /usr/sbin/one-prepare-firecracker-domain'
}
#---------------------------------------------------------------------------
# Class constructors & static methods
#---------------------------------------------------------------------------
# Creates the microVM object in memory.
# Can be later created in Firecracker using create method
def initialize(fc, one, client)
#
# @param [FirecrackerVM] object containing ONE VM information (XML)
# @param [FirecrackerClient] client to interact with Firecracker API
def initialize(one, client)
@one = one
@client = client
@fc = fc
@one = one
@jailer_command = 'sudo -n jailer'
@vnc_command = 'screen -x'
# Location for maping the context
@map_location = "#{@one.sysds_path}/#{@one.vm_id}/map_context"
@map_location = "#{@one.location}/map_context"
return if @one.nil?
@ -63,158 +55,13 @@ class MicroVM
# Creates microVM from a OpenNebula VM xml description
def new_from_xml(one_xml, client)
one = OpenNebulaVM.new(one_xml)
one = FirecrackerVM.new(one_xml)
MicroVM.new(one.to_fc, one, client)
MicroVM.new(one, client)
end
end
#---------------------------------------------------------------------------
# Utils
#---------------------------------------------------------------------------
def gen_deployment_file
File.open("#{vm_location}/deployment.file", 'w+') do |file|
file.write(@fc['deployment-file'].to_json)
end
end
def gen_logs_files
path_log = "#{vm_location}/#{@fc['deployment-file']['logger']['log_fifo']}"
path_metrics = "#{vm_location}/#{@fc['deployment-file']['logger']['metrics_fifo']}"
File.open(path_log, 'w')
File.open(path_metrics, 'w')
end
def vm_location
"#{@one.sysds_path}/#{@one.vm_id}"
end
def get_pid
rc, stdout, = Command.execute('ps auxwww | grep ' \
"\"^.*firecracker.*--id['\\\"=[[:space:]]]*#{@one.vm_name}\" " \
'| grep -v grep', false)
if !rc.zero? || stdout.nil?
return -1
end
Integer(stdout.split[1])
end
def map_context
context = {}
# retrieve context information
@one.context(context)
return 0 unless context['context'] # return if there is no context
context_location = context['context']['source']
params = " #{context_location} #{context_location}"
cmd = "#{COMMANDS[:map_context]} #{params}"
Command.execute_rc_log(cmd, false)
end
def wait_shutdown
t_start = Time.now
timeout = @one.fcrc[:shutdown_timeout]
next while (Time.now - t_start < timeout) && (get_pid > 0)
get_pid < 0
end
def wait_deploy
t_start = Time.now
timeout = 5
next while (Time.now - t_start < timeout) && (get_pid < 0)
get_pid > 0
end
# rubocop:disable Lint/RedundantCopDisableDirective
# rubocop:disable Lint/SuppressedException
def wait_cgroup(path)
t_start = Time.now
timeout = @one.fcrc[:cgroup_delete_timeout]
next while !File.read(path).empty? && (Time.now - t_start < timeout)
File.read(path).empty?
rescue Errno::ENOENT
end
# rubocop:enable Lint/SuppressedException
# rubocop:enable Lint/RedundantCopDisableDirective
def cpu_shares(cpu)
# default value for cpu.shares
default_value = 1024
shares_enabled = @one.fcrc[:cgroup_cpu_shares] == true
return default_value if !shares_enabled || cpu.nil? || cpu == ''
shares_val = (cpu * default_value).round
# The value specified in the cpu.shares file must be 2 or higher.
shares_val = 2 if shares_val < 2
shares_val
end
def prepare_domain
cgroup_path = @one.fcrc[:cgroup_location]
cpu_val = cpu_shares(@one.get_cpu)
params = "-c #{cgroup_path} -p #{cpu_val} -s #{@one.sysds_path}"\
" -v #{@one.vm_id}"
cmd = "#{COMMANDS[:prepare_domain]} #{params}"
Command.execute_rc_log(cmd)
end
#---------------------------------------------------------------------------
# VNC
#---------------------------------------------------------------------------
# Start the svncterm server if it is down.
def vnc(signal)
command = @one.vnc_command(signal, @vnc_command)
return if command.nil?
w = @one.fcrc[:vnc][:width]
h = @one.fcrc[:vnc][:height]
t = @one.fcrc[:vnc][:timeout]
vnc_args = "-w #{w} -h #{h} -t #{t}"
pipe = '/tmp/svncterm_server_pipe'
bin = 'svncterm_server'
server = "#{bin} #{vnc_args}"
rc, _o, e = Command.execute_once(server, true)
unless [nil, 0].include?(rc)
OpenNebula.log_error("#{__method__}: #{e}\nFailed to start vnc")
return
end
lfd = Command.lock
File.open(pipe, 'a') do |f|
f.write command
end
ensure
Command.unlock(lfd) if lfd
end
#---------------------------------------------------------------------------
# Container Management & Monitor
#---------------------------------------------------------------------------
@ -222,6 +69,7 @@ class MicroVM
# Create a microVM
def create
cmd = ''
cmd_params = @one.command_params
# TODO: make screen oprions configurable to support different versions
# TODO: make screen configurable to enable use of tmux etc..
@ -232,16 +80,24 @@ class MicroVM
# Build jailer command params
cmd << @jailer_command
@fc['command-params']['jailer'].each do |key, val|
cmd_params['jailer'].each do |key, val|
cmd << " --#{key} #{val}"
end
# Build firecracker params
cmd << ' --'
@fc['command-params']['firecracker'].each do |key, val|
cmd_params['firecracker'].each do |key, val|
cmd << " --#{key} #{val}"
end
# Generate files required for the microVM
File.open("#{@one.location}/deployment.file", 'w+') do |file|
file.write(@one.to_fc)
end
File.open(@one.log_path, 'w') {}
File.open(@one.metrics_path, 'w') {}
return false unless prepare_domain
return false unless map_context
@ -282,6 +138,110 @@ class MicroVM
Command.execute_rc_log(cmd, false)
end
#---------------------------------------------------------------------------
# VNC
#---------------------------------------------------------------------------
def vnc(signal)
@one.vnc(signal, @vnc_command, @one.fcrc[:vnc])
end
#---------------------------------------------------------------------------
# Utils
#---------------------------------------------------------------------------
def wait_shutdown
t_start = Time.now
timeout = @one.fcrc[:shutdown_timeout]
next while (Time.now - t_start < timeout) && (get_pid > 0)
get_pid < 0
end
def wait_deploy
t_start = Time.now
timeout = 5
next while (Time.now - t_start < timeout) && (get_pid < 0)
get_pid > 0
end
private
#---------------------------------------------------------------------------
# List of commands executed by the driver.
#---------------------------------------------------------------------------
COMMANDS = {
:clean => 'sudo -n /usr/sbin/one-clean-firecracker-domain',
:map_context => '/var/tmp/one/vmm/firecracker/map_context',
:prepare_domain => 'sudo -n /usr/sbin/one-prepare-firecracker-domain'
}
#---------------------------------------------------------------------------
# Helpers
#---------------------------------------------------------------------------
def get_pid
rc, stdout, = Command.execute('ps auxwww | grep ' \
"\"^.*firecracker.*--id['\\\"=[[:space:]]]*#{@one.vm_name}\" " \
'| grep -v grep', false)
if !rc.zero? || stdout.nil?
return -1
end
Integer(stdout.split[1])
end
def map_context
context = {}
# retrieve context information
@one.context(context)
return 0 unless context['context'] # return if there is no context
context_location = context['context']['source']
params = " #{context_location} #{context_location}"
cmd = "#{COMMANDS[:map_context]} #{params}"
Command.execute_rc_log(cmd, false)
end
# rubocop:disable Lint/RedundantCopDisableDirective
# rubocop:disable Lint/SuppressedException
def wait_cgroup(path)
t_start = Time.now
timeout = @one.fcrc[:cgroup_delete_timeout]
next while !File.read(path).empty? && (Time.now - t_start < timeout)
File.read(path).empty?
rescue Errno::ENOENT
end
# rubocop:enable Lint/SuppressedException
# rubocop:enable Lint/RedundantCopDisableDirective
def prepare_domain
cgroup_path = @one.fcrc[:cgroup_location]
if @one.fcrc[:cgroup_cpu_shares] == true
cpu_val = @one.cpu_shares
else
cpu_val = OpenNebulaVM::CGROUP_DEFAULT_SHARES
end
params = "-c #{cgroup_path} -p #{cpu_val} -s #{@one.sysds_path}"\
" -v #{@one.vm_id}"
cmd = "#{COMMANDS[:prepare_domain]} #{params}"
Command.execute_rc_log(cmd)
end
# rubocop:enable Naming/AccessorMethodName
# rubocop:enable Layout/LineLength

View File

@ -13,11 +13,14 @@
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'rexml/document'
require 'yaml'
require 'command'
# This class reads and holds configuration attributes for the LXD driver
require_relative '../lib/xmlparser'
require_relative '../lib/opennebula_vm'
require_relative '../../scripts_common'
# This class reads and holds configuration attributes for the Firecracker driver
class FirecrackerConfiguration < Hash
DEFAULT_CONFIGURATION = {
@ -53,85 +56,90 @@ class FirecrackerConfiguration < Hash
end
# This class parses and wraps the information in the Driver action data
class OpenNebulaVM
class FirecrackerVM < OpenNebulaVM
# rubocop:disable Naming/PredicateName
# rubocop:disable Naming/AccessorMethodName
attr_reader :xml, :vm_id, :vm_name, :sysds_path, :rootfs_id, :fcrc
attr_reader :fcrc
#---------------------------------------------------------------------------
# Class Constructor
#---------------------------------------------------------------------------
def initialize(xml)
@xml = XMLElement.new_s(xml)
@xml = @xml.element('//VM')
@vm_id = Integer(@xml['//TEMPLATE/VMID'])
@vm_name = @xml['//DEPLOY_ID']
@vm_name = "one-#{@vm_id}" if @vm_name.empty?
# Load Driver configuration
@fcrc = FirecrackerConfiguration.new
sysds_id = @xml['//HISTORY_RECORDS/HISTORY/DS_ID']
@sysds_path = "#{@fcrc[:datastore_location]}/#{sysds_id}"
super(xml, @fcrc)
return if wild?
# Sets the DISK ID of the root filesystem
disk = @xml.element('//TEMPLATE/DISK')
return unless disk
@rootfs_id = disk['DISK_ID']
boot_order = @xml['//TEMPLATE/OS/BOOT']
@rootfs_id = boot_order.split(',')[0][-1] unless boot_order.empty?
@boot_args = @xml['//TEMPLATE/OS/KERNEL_CMD']
@uid = @fcrc[:uid]
@gid = @fcrc[:gid]
@exec_file = @fcrc[:firecracker_location]
end
def has_context?
!@xml['//TEMPLATE/CONTEXT/DISK_ID'].empty?
end
def wild?
@vm_name && !@vm_name.include?('one-')
end
def get_cpu
Float(@xml['//TEMPLATE/CPU'])
end
# Returns a Hash representing the LXC configuration for this OpenNebulaVM
# Returns a Hash representing the Firecracker configuration for this OpenNebulaVM
def to_fc
fc = {}
fc['name'] = @vm_name
fc['deployment-file'] = {}
fc['deployment-file']['boot-source'] = {}
fc['deployment-file']['drives'] = []
fc['deployment-file']['machine-config'] = {}
fc['deployment-file']['network-interfaces'] = []
fc['command-params'] = {}
fc['boot-source'] = {}
fc['drives'] = []
fc['machine-config'] = {}
fc['network-interfaces'] = []
# Set logger info
fc['deployment-file']['logger'] = {}
fc['deployment-file']['logger']['log_fifo'] = 'logs.fifo'
fc['deployment-file']['logger']['metrics_fifo'] = 'metrics.fifo'
fc['logger'] = {}
fc['logger']['log_fifo'] = LOG_FILE
fc['logger']['metrics_fifo'] = METRICS_FILE
boot_source(fc['deployment-file']['boot-source'])
drives(fc['deployment-file']['drives'])
machine_config(fc['deployment-file']['machine-config'])
nic(fc['deployment-file']['network-interfaces'])
command_params(fc['command-params'])
boot_source(fc['boot-source'])
drives(fc['drives'])
machine_config(fc['machine-config'])
nic(fc['network-interfaces'])
fc
fc.to_json
end
def command_params
hash = {}
hash['jailer'] = {}
hash['firecracker'] = {}
jailer_params(hash['jailer'])
firecracker_params(hash['firecracker'])
hash
end
def context(hash)
cid = @xml['//TEMPLATE/CONTEXT/DISK_ID']
return if cid.empty?
source = disk_location(cid)
hash['context'] = {
'type' => 'disk',
'source' => source,
'path' => '/context',
'disk_id' => cid
}
end
def log_path
"#{location}/#{LOG_FILE}"
end
def metrics_path
"#{location}/#{METRICS_FILE}"
end
private
LOG_FILE = 'logs.fifo'
METRICS_FILE = 'metrics.fifo'
#---------------------------------------------------------------------------
# MicroVM Attribute Mapping
#---------------------------------------------------------------------------
@ -155,14 +163,6 @@ class OpenNebulaVM
hash['ht_enabled'] = !(ht.nil? || ht.empty? || Integer(ht.to_s) <= 1)
end
def command_params(hash)
hash['jailer'] = {}
hash['firecracker'] = {}
jailer_params(hash['jailer'])
firecracker_params(hash['firecracker'])
end
def jailer_params(hash)
hash['id'] = "one-#{vm_id}"
hash['node'] = get_numa_node
@ -180,10 +180,6 @@ class OpenNebulaVM
# MicroVM Device Mapping: Networking
#---------------------------------------------------------------------------
def get_nics
@xml.elements('//TEMPLATE/NIC')
end
def nic(array)
get_nics.each do |n|
eth = {
@ -229,31 +225,6 @@ class OpenNebulaVM
array << drive
end
def get_disks
@xml.elements('//TEMPLATE/DISK')
end
def context(hash)
cid = @xml['//TEMPLATE/CONTEXT/DISK_ID']
return if cid.empty?
source = disk_location(cid)
hash['context'] = {
'type' => 'disk',
'source' => source,
'path' => '/context',
'disk_id' => cid
}
end
def disk_location(disk_id)
datastore = @sysds_path
datastore = File.readlink(@sysds_path) if File.symlink?(@sysds_path)
"#{datastore}/#{@vm_id}/disk.#{disk_id}"
end
#---------------------------------------------------------------------------
# MicroVM Device Mapping: NUMA
#---------------------------------------------------------------------------
@ -283,82 +254,8 @@ class OpenNebulaVM
def random_policy(nodes)
Integer(nodes.sample.gsub('node', ''))
end
#---------------------------------------------------------------------------
# Container Mapping: Extra Configuration & Profiles
#---------------------------------------------------------------------------
# Creates microVM vnc connection
# Creates or closes a connection to a microVM rfb port depending on signal
def vnc_command(signal, vnc_command)
data = @xml.element('//TEMPLATE/GRAPHICS')
return unless data && data['TYPE'].casecmp('vnc').zero?
pass = data['PASSWD']
pass = '-' if pass.empty?
case signal
when 'start'
"#{data['PORT']} #{pass} #{vnc_command} #{@vm_name}\n"
when 'stop'
"-#{data['PORT']}\n"
end
end
def vnc?
data = @xml.element('//TEMPLATE/GRAPHICS')
data && data['TYPE'].casecmp('vnc').zero?
end
# rubocop:enable Naming/PredicateName
# rubocop:enable Naming/AccessorMethodName
end
# This class abstracts the access to XML elements. It provides basic methods
# to get elements by their xpath
class XMLElement
def initialize(xml)
@xml = xml
end
# Create a new XMLElement using a xml document in a string
def self.new_s(xml_s)
xml = nil
xml = REXML::Document.new(xml_s).root unless xml_s.empty?
new(xml)
end
# Gets the text associated to a th element. The element is select by
# its xpath. If not found an empty string is returned
def [](key)
element = @xml.elements[key.to_s]
return '' if (element && !element.has_text?) || !element
element.text
end
# Return an XMLElement for the given xpath
def element(key)
e = @xml.elements[key.to_s]
element = nil
element = XMLElement.new(e) if e
element
end
# Get elements by xpath. This function returns an Array of XMLElements
def elements(key)
collection = []
@xml.elements.each(key) do |pelem|
collection << XMLElement.new(pelem)
end
collection
end
end

View File

@ -0,0 +1,146 @@
#!/usr/bin/ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'command'
# LXC Cli Client. Runs LXC commands listed in COMMANDS and returns their
# output in convenient data structures
class LXCClient
# LXC CLI Commands
COMMANDS = {
:attach => 'sudo lxc-attach',
:config => 'sudo lxc-config',
:console => 'sudo lxc-console',
:create => 'sudo lxc-create',
:destroy => 'sudo lxc-destroy',
:info => 'sudo lxc-info',
:ls => 'sudo lxc-ls',
:start => 'sudo lxc-start',
:stop => 'sudo lxc-stop'
}
# Returns LXC version
def version
_, rc, = Command.execute_log("#{COMMANDS[:ls]} --version")
rc
end
#-----------------------------------------------------------------------
# Life Cycle Operations
#-----------------------------------------------------------------------
def start(name, options = {})
cmd = append_options("#{COMMANDS[:start]} -n '#{name}'", options)
Command.execute_rc_log(cmd)
end
def stop(name, options = {})
cmd = append_options("#{COMMANDS[:stop]} -n '#{name}'", options)
Command.execute_rc_log(cmd)
end
def create(name, options = {})
options[:template] ||= 'none' # Template is mandatory
cmd = append_options("#{COMMANDS[:create]} -n '#{name}'", options)
Command.execute_rc_log(cmd)
end
def destroy(name, options = {})
cmd = append_options("#{COMMANDS[:destroy]} -n '#{name}'", options)
Command.execute_rc_log(cmd)
end
#-----------------------------------------------------------------------
# Monitoring
#-----------------------------------------------------------------------
# Creates a hash from lxc-info
# Example output of the command
# Name: one-lxc
# State: RUNNING
# PID: 3706092
# CPU use: 0.33 seconds
# BlkIO use: 6.06 MiB
# Memory use: 10.19 MiB
# KMem use: 3.13 MiB
def info(name, options = {})
cmd = append_options("#{COMMANDS[:info]} -n '#{name}'", options)
rc, stdout, = Command.execute_log(cmd)
if rc == false
return
end
hash_out(stdout)
end
# Returns a list of container
def list(options = {})
cmd = append_options(COMMANDS[:ls], options)
rc, stdout, = Command.execute_log(cmd)
if rc == false
return
end
stdout.split
end
private
# append options to cmd string
# options name should match an existing comand option.
# If the option has no value it should look like {:option => nil}
def append_options(cmd, options)
rc = cmd
options.each do |opt, val|
if opt.size == 1
rc << " -#{opt}"
else
rc << " --#{opt}"
end
rc << " #{val}" if val
end
rc
end
# lxc-info like out
def hash_out(output)
hash = {}
output.split("\n").each do |element|
element.strip!
info = element.split(/:\s+/)
if hash[info[0]]
hash[info[0]] = Array(hash[info[0]])
hash[info[0]] << info[1]
else
hash[info[0]] = info[1]
end
end
hash
end
end

View File

@ -0,0 +1,27 @@
#!/usr/bin/ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require_relative '../lib/command'
# This module can be used to execute commands. It wraps popen3 and provides
# locking capabilites using flock
module Command
LOCK_FILE = '/tmp/onelxc-lock'
end

View File

@ -0,0 +1,164 @@
#!/usr/bin/ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
$LOAD_PATH.unshift File.dirname(__FILE__)
require 'client'
require 'opennebula_vm'
# LXC Container abstraction. Handles container native and added
# operations. Allows to gather container config and status data
class Container
#-----------------------------------------------------------------------
# Class constructors & static methods
#-----------------------------------------------------------------------
# @param [LXCVM] ONE VM information (XML), LXC specilization
# @param [LXCClient] client to interact with LXC (command line)
def initialize(one, client)
@one = one
@client = client
end
class << self
# Creates container from a OpenNebula VM xml description
def new_from_xml(one_xml, client)
one = LXCVM.new(one_xml)
Container.new(one, client)
end
end
#-----------------------------------------------------------------------
# Life Cycle Operations
#-----------------------------------------------------------------------
# Creates container in Linux
def create(options = {})
options[:config] = "#{@one.location}/deployment.file"
File.open(options[:config], 'w+') do |file|
file.write(@one.to_lxc)
end
# Map storage
error = false
mounted = []
@one.disks.each do |disk|
if disk.mount(@one.lxcrc)
mounted << disk
else
error = true
break
end
end
# Rollback if error
if error
mounted.each {|d| d.umount }
return false
end
@client.create(@one.vm_name, options)
end
# Remove container in Linux
def cancel
options = { :kill => nil }
rc = @client.stop(@one.vm_name, options)
return false unless rc
# Unmount disk, remove folders, containter and destroy
clean
end
def start
rc = @client.start(@one.vm_name)
# Clean if container fails to start
if !rc
clean(true) # Unmount disk, remove folders, containter and destroy
return false
end
wait_deploy(5)
end
def shutdown
rc = @client.stop(@one.vm_name)
return false unless rc
# Unmount disk, remove folders, containter and destroy
clean
end
def reboot
rc = @client.stop(@one.vm_name)
return false unless rc
@client.start(@one.vm_name)
end
def clean(ignore_err = false)
# Unmap storage
@one.disks.each do |disk|
rc = disk.umount({ :ignore_err => ignore_err })
return false if ignore_err != true && !rc
end
# Clean bindpoint
FileUtils.rm_rf(@one.bind_folder)
# Destroy container
@client.destroy(@one.vm_name)
end
#---------------------------------------------------------------------------
# VNC
#---------------------------------------------------------------------------
def vnc(signal)
@one.vnc(signal, @one.lxcrc[:vnc][:command], @one.lxcrc[:vnc])
end
private
STATES = {
:running => 'RUNNING',
:stopped => 'STOPPED'
}
# Waits for the container to be RUNNING
# @param timeout[Integer] seconds to wait for the conatiner to start
def wait_deploy(timeout)
t_start = Time.now
next while (Time.now - t_start < timeout) &&
(@client.info(@one.vm_name)['State'] != STATES[:running])
@client.info(@one.vm_name)['State'] == STATES[:running]
end
end

View File

@ -0,0 +1,324 @@
#!/usr/bin/ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
$LOAD_PATH.unshift File.dirname(__FILE__)
require 'yaml'
# Storage mappers
require 'storageutils'
require 'qcow2'
require 'raw'
require 'rbd'
require_relative '../lib/xmlparser'
require_relative '../lib/opennebula_vm'
require_relative '../../scripts_common'
# -----------------------------------------------------------------------------
# This class reads and holds configuration attributes for the LXC driver
# -----------------------------------------------------------------------------
class LXCConfiguration < Hash
DEFAULT_CONFIGURATION = {
:vnc => {
:width => '800',
:height => '600',
:timeout => '300',
:command => 'sudo lxc-console'
},
:filesystem => 'ext4',
:datastore_location => '/var/lib/one/datastores',
:default_lxc_config => '/usr/share/lxc/config/common.conf',
:id_map => 600100001, # First id for mapping
:max_map => 65536 # Max id for mapping
}
LXCRC = '../../etc/vmm/lxc/lxcrc'
def initialize
replace(DEFAULT_CONFIGURATION)
begin
merge!(YAML.load_file("#{__dir__}/#{LXCRC}"))
rescue StandardError => e
OpenNebula.log_error e
end
super
end
end
# -----------------------------------------------------------------------------
# This class parses and wraps the information in the Driver action data
# -----------------------------------------------------------------------------
class LXCVM < OpenNebulaVM
CONTAINER_FS_PATH = '/var/lib/lxc-one'
attr_reader :lxcrc, :bind_folder
def initialize(xml)
# Load Driver configuration
@lxcrc = LXCConfiguration.new
super(xml, @lxcrc)
@bind_folder = "#{CONTAINER_FS_PATH}/#{@vm_id}"
end
# Returns a Hash representing the LXC configuration for this OpenNebulaVM
def to_lxc
prefix = 'lxc'
lxc = {}
# Always include default file with uids mapping
lxc["#{prefix}.include"] = [@lxcrc[:default_lxc_config]]
# Add disks
disks.each do |disk|
disk.to_lxc.each do |key, val|
if lxc[key].nil?
lxc[key] = [val]
else
lxc[key] << val
end
end
end
# Add nics
get_nics.each_with_index do |nic, i|
lxc["lxc.net.#{i}.type"] = 'veth'
lxc["lxc.net.#{i}.link"] = nic['BRIDGE']
lxc["lxc.net.#{i}.hwaddr"] = nic['MAC']
lxc["lxc.net.#{i}.veth.pair"] = "one-#{@vm_id}-#{nic['NIC_ID']}"
end
# Add cgroup limitations
# rubocop:disable Layout/LineLength
lxc['lxc.cgroup.cpu.shares'] = cpu_shares
lxc['lxc.cgroup.memory.limit_in_bytes'] = memmory_limit_in_bytes
lxc['lxc.cgroup.memory.oom_control'] = 1 # Avoid OOM to kill the process when limit is reached
# rubocop:enable Layout/LineLength
# User mapping
# rubocop:disable Layout/LineLength
lxc['lxc.idmap'] = ["u 0 #{@lxcrc[:id_map]} #{@lxcrc[:max_map]}",
"g 0 #{@lxcrc[:id_map]} #{@lxcrc[:max_map]}"]
# rubocop:enable Layout/LineLength
hash_to_lxc(lxc)
end
# Returns an Array of Disk objects, each one represents an OpenNebula DISK
def disks
adisks = []
return adisks if @rootfs_id.nil?
@xml.elements('//TEMPLATE/DISK').each do |xml|
next if xml['TYPE'].downcase == 'swap'
if xml['DISK_ID'] == @rootfs_id
adisks << Disk.new_root(xml, @sysds_path, @vm_id)
else
adisks << Disk.new_disk(xml, @sysds_path, @vm_id)
end
end
context_xml = @xml.element('//TEMPLATE/CONTEXT')
if context_xml
adisks << Disk.new_context(context_xml, @sysds_path, @vm_id)
end
adisks
end
private
# Returns the config in LXC style format
def hash_to_lxc(hash)
lxc = ''
hash.each do |key, value|
# Process duplicate values from array keys
if value.class == Array
value.each do |v|
lxc << "#{key} = '#{v}'\n"
end
else
lxc << "#{key} = '#{value}'\n"
end
end
lxc
end
end
# ------------------------------------------------------------------------------
# Class for managing Container Disks
# ------------------------------------------------------------------------------
class Disk
attr_accessor :id, :vm_id, :sysds_path
class << self
def new_context(xml, sysds_path, vm_id)
Disk.new(xml, sysds_path, vm_id, false, true)
end
def new_root(xml, sysds_path, vm_id)
Disk.new(xml, sysds_path, vm_id, true, false)
end
def new_disk(xml, sysds_path, vm_id)
Disk.new(xml, sysds_path, vm_id, false, false)
end
end
# Mount mapper block device
def mount(options = {})
device = @mapper.map(self)
return false unless device
return true if Storage.setup_disk(device,
@mountpoint,
@bindpoint,
options)
# Rollback if failed to setup disk
@mapper.unmap(device)
false
end
# Umount mapper block device
# if options[:ignore_err] errors won't force early return
def umount(options = {})
device = find_device
return true if device.empty?
rc = Storage.teardown_disk(@mountpoint, @bindpoint)
return false if !rc && options[:ignore_err] != true
# unmap image
@mapper.unmap(device)
end
# Generate disk into LXC config format
def to_lxc
return { 'lxc.rootfs.path' => @bindpoint } if @is_rootfs
if @is_context
path = 'context'
opts = 'none rbind,ro,create=dir,optional 0 0'
else
# TODO: Adjustable guest mountpoint
path = "media/one-disk.#{@id}"
opts = 'none rbind,create=dir,optional 0 0'
end
{ 'lxc.mount.entry' => "#{@bindpoint} #{path} #{opts}" }
end
def swap?
@xml['TYPE'] == 'swap'
end
def volatile?
@xml['TYPE'] == 'fs'
end
# Access Disk attributes
def [](k)
@xml[k]
end
private
def initialize(xml, sysds_path, vm_id, is_rootfs, is_context)
@xml = xml
@vm_id = vm_id
@id = @xml['DISK_ID'].to_i
@sysds_path = sysds_path
@is_rootfs = is_rootfs
@is_context = is_context
# rubocop:disable all
@mapper = if @xml['FORMAT'].downcase == 'qcow2'
Qcow2Mapper.new
elsif @xml['DISK_TYPE'].downcase == 'rbd'
RBDMapper.new(self)
else
RawMapper.new
end
# rubocop:enable all
datastore = @sysds_path
datastore = File.readlink(@sysds_path) if File.symlink?(@sysds_path)
@mountpoint = "#{datastore}/#{@vm_id}/mapper/disk.#{@id}"
@bindpoint = "#{LXCVM::CONTAINER_FS_PATH}/#{@vm_id}/disk.#{@id}"
end
# Returns the associated linux device for the mountpoint
def find_device
sys_parts = Storage.lsblk('')
device = ''
real_path = @mountpoint
is_shared_ds = File.symlink?(@sysds_path)
real_path = File.realpath(real_path) if !@is_rootfs && is_shared_ds
sys_parts.each do |d|
if d['mountpoint'] == real_path
device = d['path']
break
end
if d['children']
d['children'].each do |c|
next unless c['mountpoint'] == real_path
device = d['path']
break
end
end
break unless device.empty?
end
if device.empty?
OpenNebula.log_error("Cannot detect block device from #{real_path}")
end
device
end
end

View File

@ -0,0 +1,112 @@
#!/usr/bin/ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
$LOAD_PATH.unshift File.dirname(__FILE__)
require 'command'
require 'storageutils'
# Mapper for qcow2 images (qemu-nbd)
class Qcow2Mapper
QEMU_NBD_FORK_VERSION = '2.8.0'
COMMANDS = {
:map => 'sudo -n qemu-nbd --fork -c',
:unmap => 'sudo -n qemu-nbd -d'
}
# Maps qcow2 image file to linux nbd device
def map(disk)
file = source(disk)
device = nbd_device.strip
cmd = "#{COMMANDS[:map]} #{device} #{file}"
rc = Command.execute_rc_log(cmd, false)
return unless rc
device
end
# Unmaps loopdevice from Linux
def unmap(device)
Command.execute_rc_log("#{COMMANDS[:unmap]} #{device}", false)
end
private
# Returns the source file for the disk
def source(disk)
"#{disk.sysds_path}/#{disk.vm_id}/disk.#{disk.id}"
end
# TODO, check if necessary
def fork_supported
tgt_ver = nbd_version
return false if tgt_ver == '0.0.0'
Gem::Version.new(tgt_ver) >= Gem::Version.new(QEMU_NBD_FORK_VERSION)
end
def nbd_version
cmd = "#{@commands[:nbd]} -V"
rc, out, _err = Command.execute(cmd, false)
return '0.0.0' unless rc.zero?
match_v = out.match(/qemu-nbd(?: version)? ((?:[0-9]+\.?)+)\s?\(?.*$/)
return '0.0.0' if match_v.nil?
match_v[1]
end
# Detects Max number of block devices
def nbds_max
File.read('/sys/module/nbd/parameters/nbds_max').chomp.to_i
rescue StandardError => e
STDERR.puts("Cannot load kernel module parameter\n#{e}")
0
end
# Returns the first non-used nbd device
def nbd_device
sys_parts = Storage.lsblk('')
nbds = []
sys_parts.each do |p|
m = p['name'].match(/nbd(\d+)/)
next unless m
nbds << m[1].to_i
end
nbds_max.times do |i| # if nbds_max returns 0 block is skipped
return "/dev/nbd#{i}" unless nbds.include?(i)
end
STDERR.puts("#{__method__}: Cannot find free nbd device")
''
end
end

View File

@ -0,0 +1,45 @@
#!/usr/bin/ruby
$LOAD_PATH.unshift File.dirname(__FILE__)
require 'command'
# TODO: Multipart
# Mapping utilities for loopdevice
class RawMapper
COMMANDS = {
:map => 'sudo -n losetup --find --show',
:unmap => 'sudo -n losetup -d'
}
# Maps raw image file to linux loopdevice
def map(disk)
file = source(disk)
cmd = "#{COMMANDS[:map]} #{file}"
rc, stdout, = Command.execute_log(cmd, false)
if rc == 0
return stdout.strip # device used, ex. /dev/loop0
end
nil
end
# Unmaps loopdevice from Linux
def unmap(device)
cmd = "#{COMMANDS[:unmap]} #{device}"
Command.execute_rc_log(cmd)
end
private
# Returns the source file for the disk
def source(disk)
"#{disk.sysds_path}/#{disk.vm_id}/disk.#{disk.id}"
end
end

View File

@ -0,0 +1,81 @@
#!/usr/bin/ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
$LOAD_PATH.unshift File.dirname(__FILE__)
require 'command'
require 'storageutils'
# ------------------------------------------------------------------------------
# Ceph RBD mapper it uses rbd-nbd to map a rbd image to a nbd (Network Block
# Device) device.
# ------------------------------------------------------------------------------
class RBDMapper
COMMANDS = {
:rbd => 'sudo -n rbd-nbd'
}
def initialize(disk)
@map_cmd = "#{COMMANDS[:rbd]} --id #{disk['CEPH_USER']} map"
@unmap_cmd = "#{COMMANDS[:rbd]} --id #{disk['CEPH_USER']} unmap"
end
# @return[String, nil] map device or nil in case of error
def map(disk)
dsrc = source(disk)
return unless dsrc
rc, out, err = Command.execute("#{@map_cmd} #{dsrc}", true)
unless rc.zero?
OpenNebula.log_error("#{__method__}: #{err}")
return
end
out.chomp
end
# @return [Bool] true if command succeed
def unmap(device)
Command.execute_rc_log("#{@unmap_cmd} #{device}", false)
end
private
# Creates the rbd name. Name structure define in tm/ceph
# - no-persistent (clone) "${SRC_PATH}-${VM_ID}-${DISK_ID}"
# - persistent (ln) "${SRC_PATH}
# - volatile (mkimage) "${POOL_NAME}/one-sys-${VMID}-${DISK_ID}"
def source(disk)
src = disk['SOURCE']
if disk['CLONE'].upcase == 'YES'
src = "#{src}-#{disk.vm_id}-#{disk.id}"
elsif disk.volatile?
src = "#{disk['POOL_NAME']}/one-sys-#{disk.vm_id}-#{disk.id}"
end
src
rescue StandardError
OpenNebula.log_error("#{__method__}: Cannot set disk source")
nil
end
end

View File

@ -0,0 +1,208 @@
#!/usr/bin/ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
$LOAD_PATH.unshift File.dirname(__FILE__)
$LOAD_PATH.unshift File.dirname('../lib/')
require 'json'
require 'fileutils'
require 'command'
# Interface to Linux storage management commands
module Storage
PRE = 'sudo -n '
COMMANDS ={
:mount => "#{PRE}mount",
:umount => "#{PRE}umount",
:lsblk => 'lsblk',
:mkdir => 'mkdir -p',
:su_mkdir => "#{PRE}mkdir -p",
:cat => 'cat',
:file => 'file -L -s',
:blkid => "#{PRE}blkid",
:e2fsck => "#{PRE}e2fsck",
:resize2fs => "#{PRE}resize2fs",
:xfs_growfs => "#{PRE}xfs_growfs",
:bind => "#{PRE} bindfs" # TODO, allow customize offset
}
def self.setup_disk(device, mountpoint, bindpoint, options)
begin
# Get device filesystem (e.g ext4, xfs, ...)
device_fs = device_fs(device)
# Resize device if extX like filesystem is used
if device_fs.match?(/^ext([2-4])$/)
resize_ext(device)
end
# Mount device in mapper folder <sys_ds_folder>/<vm_id>/mapper/disk.id
return false unless mount(device, mountpoint)
# Resize device if xfs like filesystem is used
if device_fs == 'xfs'
resize_xfs(mountpoint)
end
# Bind @mountpoint into to the public accesible folder (@bindpoint)
return true if bind(mountpoint, bindpoint, options)
rescue StandardError
end
umount(bindpoint)
umount(mountpoint)
false
end
def self.teardown_disk(mountpoint, bindpoint)
# unbind fs
return false unless umount(bindpoint)
# unmount fs
umount(mountpoint)
end
# Get the partitions on the system or device
# @param device [String] to get the partitions from. Use and empty string
# for host partitions
# @return [Hash] with partitions
def self.lsblk(device)
partitions = {}
rc, o, e = Command.execute("#{COMMANDS[:lsblk]} -OJ #{device}", false)
if rc != 0 || o.empty?
STDERR.puts("lsblk: #{e}")
return partitions
end
begin
partitions = JSON.parse(o)['blockdevices']
if !device.empty?
partitions = partitions[0]
if partitions['children']
partitions = partitions['children']
else
partitions = [partitions]
end
partitions.delete_if do |p|
p['fstype'].casecmp('swap').zero? if p['fstype']
end
end
rescue StandardError
STDERR.puts("lsblk: error parsing lsblk -OJ #{device}")
return {}
end
# Fix for lsblk paths for version < 2.33
partitions.each do |p|
lsblk_path(p)
p['children'].each {|q| lsblk_path(q) } if p['children']
end
partitions
end
class << self
private
# Mount device in directory
def mount(device, directory, _options = {})
FileUtils.mkdir_p(directory)
cmd = "#{COMMANDS[:mount]} #{device} #{directory}"
Command.execute_rc_log(cmd)
end
# bind src directory into target
# options:
# - :id_map (Integer): will apply the corresponding offset to UID/GID
def bind(src, target, options = {})
cmd_opts = ''
FileUtils.mkdir_p(target)
# Add offset options
if !options[:id_map].nil?
cmd_opts = "--uid-offset=#{options[:id_map]} "\
"--gid-offset=#{options[:id_map]}"
end
# Bindfs
cmd = "#{COMMANDS[:bind]} #{cmd_opts} #{src} #{target}"
Command.execute_rc_log(cmd)
end
# Umount mountpoint
def umount(mountpoint, _options = {})
cmd = "#{COMMANDS[:umount]} #{mountpoint}"
return false unless Command.execute_rc_log(cmd)
# clean mountpoint
FileUtils.rm_rf(mountpoint)
true
end
# Adds path to the partition Hash. This is needed for lsblk version < 2.33
def lsblk_path(p)
return unless !p['path'] && p['name']
if File.exist?("/dev/#{p['name']}")
p['path'] = "/dev/#{p['name']}"
elsif File.exist?("/dev/mapper/#{p['name']}")
p['path'] = "/dev/mapper/#{p['name']}"
end
end
# Return the FSTYPE of a device or an empty string if not defined
def device_fs(device)
rc, o, = Command.execute_log("#{COMMANDS[:lsblk]} -o NAME,FSTYPE")
return '' unless rc
# Get filesystem type if defined
o.match(/#{device.gsub('/dev/', '')}.*/)[0]
.split[1]
.strip rescue ''
end
# Resize an extX like device
def resize_ext(device)
Command.execute_rc_log("#{COMMANDS[:e2fsck]} -y -f #{device}")
Command.execute_rc_log("#{COMMANDS[:resize2fs]} #{device}")
end
# Resize an xfs like device
def resize_xfs(fs)
Command.execute_rc_log("#{COMMANDS[:xfs_growfs]} #{fs}")
end
end
end

View File

@ -0,0 +1,189 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
# This class parses and wraps the information in the Driver action data
class OpenNebulaVM
# rubocop:disable Naming/PredicateName
# rubocop:disable Naming/AccessorMethodName
attr_reader :vm_id, :vm_name, :sysds_path
CGROUP_DEFAULT_SHARES = 1024
#---------------------------------------------------------------------------
# Class Constructor
#---------------------------------------------------------------------------
def initialize(xml, mad_conf)
@xml = XMLElement.new_s(xml)
@xml = @xml.element('//VM')
@vm_id = Integer(@xml['//TEMPLATE/VMID'])
@vm_name = @xml['//DEPLOY_ID']
@vm_name = "one-#{@vm_id}" if @vm_name.empty?
if mad_conf[:datastore_location]
sysds_id = @xml['//HISTORY_RECORDS/HISTORY/DS_ID']
@sysds_path = "#{mad_conf[:datastore_location]}/#{sysds_id}"
end
return if wild?
# Sets the DISK ID of the root filesystem
disk = @xml.element('//TEMPLATE/DISK')
return unless disk
@rootfs_id = disk['DISK_ID']
boot_order = @xml['//TEMPLATE/OS/BOOT']
@rootfs_id = boot_order.split(',')[0][-1] unless boot_order.empty?
end
#---------------------------------------------------------------------------
# Helpers
#---------------------------------------------------------------------------
def has_context?
!@xml['//TEMPLATE/CONTEXT/DISK_ID'].empty?
end
def wild?
@vm_name && !@vm_name.include?('one-')
end
def vnc?
data = @xml.element('//TEMPLATE/GRAPHICS')
data && data['TYPE'].casecmp('vnc').zero?
end
def get_cpu
Float(@xml['//TEMPLATE/CPU'])
end
def get_nics
@xml.elements('//TEMPLATE/NIC')
end
def get_disks
@xml.elements('//TEMPLATE/DISK')
end
def location
"#{sysds_path}/#{vm_id}"
end
def disk_location(disk_id)
datastore = @sysds_path
datastore = File.readlink(@sysds_path) if File.symlink?(@sysds_path)
"#{datastore}/#{@vm_id}/disk.#{disk_id}"
end
# Start/stop the svncterm server.
def vnc(signal, command, options)
command = vnc_command(signal, command)
return if command.nil?
w = options[:width]
h = options[:height]
t = options[:timeout]
vnc_args = "-w #{w} -h #{h} -t #{t}"
pipe = '/tmp/svncterm_server_pipe'
bin = 'svncterm_server'
server = "#{bin} #{vnc_args}"
rc, _o, e = Command.execute_once(server, true)
unless [nil, 0].include?(rc)
OpenNebula.log_error("#{__method__}: #{e}\nFailed to start vnc")
return
end
lfd = Command.lock
File.open(pipe, 'a') do |f|
f.write command
end
ensure
Command.unlock(lfd) if lfd
end
#---------------------------------------------------------------------------
# Cgroups
#---------------------------------------------------------------------------
# Return the value for cpu.shares cgroup based on the value of CPU.
#
# cpu.shares
# contains an integer value that specifies a relative share of CPU time
# available to the tasks in a cgroup. For example, tasks in two
# that have cpu.shares set to 100 will receive equal CPU time, but tasks
# in a cgroup that has cpu.shares set to 200 receive twice the CPU time of
# tasks in a cgroup where cpu.shares is set to 100. The value specified in
# the cpu.shares file must be 2 or higher.
# (https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/sec-cpu)
def cpu_shares
cpu = get_cpu
return CGROUP_DEFAULT_SHARES if cpu.nil? || cpu == ''
shares_val = (cpu * CGROUP_DEFAULT_SHARES).round
# The value specified in the cpu.shares file must be 2 or higher.
shares_val = 2 if shares_val < 2
shares_val
end
# Return the value for memmory.limit_in_bytes cgroup based on the value of
# MEMORY.
#
# memory.limit_in_bytes
# sets the maximum amount of user memory (including file cache). If no
# units are specified, the value is interpreted as bytes. However, it
# is possible to use suffixes to represent larger units - k or K for
# kilobytes, m or M for megabytes, and g or G for gigabytes.
# (https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/sec-memory)
def memmory_limit_in_bytes
default_units = 'M' # MEMORY units are in MB
"#{@xml['//TEMPLATE/MEMORY']}#{default_units}"
end
private
# Creates vnc connection
# Creates or closes a connection to a vm rfb port depending on signal
def vnc_command(signal, vnc_command)
data = @xml.element('//TEMPLATE/GRAPHICS')
return unless data && data['TYPE'].casecmp('vnc').zero?
pass = data['PASSWD']
pass = '-' if pass.empty?
case signal
when 'start'
"#{data['PORT']} #{pass} #{vnc_command} #{@vm_name}\n"
when 'stop'
"-#{data['PORT']}\n"
end
end
# rubocop:enable Naming/PredicateName
# rubocop:enable Naming/AccessorMethodName
end

View File

@ -0,0 +1,66 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'rexml/document'
# This class abstracts the access to XML elements. It provides basic methods
# to get elements by their xpath
class XMLElement
def initialize(xml)
@xml = xml
end
# Create a new XMLElement using a xml document in a string
def self.new_s(xml_s)
xml = nil
xml = REXML::Document.new(xml_s).root unless xml_s.empty?
new(xml)
end
# Gets the text associated to a th element. The element is select by
# its xpath. If not found an empty string is returned
def [](key)
element = @xml.elements[key.to_s]
return '' if (element && !element.has_text?) || !element
element.text
end
# Return an XMLElement for the given xpath
def element(key)
e = @xml.elements[key.to_s]
element = nil
element = XMLElement.new(e) if e
element
end
# Get elements by xpath. This function returns an Array of XMLElements
def elements(key)
collection = []
@xml.elements.each(key) do |pelem|
collection << XMLElement.new(pelem)
end
collection
end
end

View File

@ -0,0 +1 @@
/home/christian/ws/one-ee/src/vmm_mad/remotes/common/not_supported.sh

View File

@ -0,0 +1 @@
/home/christian/ws/one-ee/src/vmm_mad/remotes/common/not_supported.sh

35
src/vmm_mad/remotes/lxc/cancel Executable file
View File

@ -0,0 +1,35 @@
#!/usr/bin/ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
$LOAD_PATH.unshift File.dirname(__FILE__)
require 'container'
require_relative '../../scripts_common'
xml = STDIN.read
client = LXCClient.new
container = Container.new_from_xml(xml, client)
# Stop vnc
container.vnc('stop')
exit(-1) unless container.cancel
exit 0

54
src/vmm_mad/remotes/lxc/deploy Executable file
View File

@ -0,0 +1,54 @@
#!/usr/bin/ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
$LOAD_PATH.unshift File.dirname(__FILE__)
require 'container'
vm_id = ARGV[2]
xml = STDIN.read
client = LXCClient.new
container = Container.new_from_xml(xml, client)
# Clean before starting the container
container.clean(true)
# Create the LXC container (lxc-create)
rc = container.create
if !rc
STDERR.puts 'There was an error creating the containter. Check oned.log.'
exit(-1)
end
# Start the LXC container (lxc-start). It will wait for the container to be
# RUNNING or fails if timeout is reached.
rc = container.start
if !rc
STDERR.puts 'There was an error starting the containter. Check oned.log.'
exit(-1)
end
# Start VNC (only started if necessary)
container.vnc('start')
# Set deploy_id
puts "one-#{vm_id}"

View File

@ -0,0 +1 @@
/home/christian/ws/one-ee/src/vmm_mad/remotes/common/not_supported.sh

View File

@ -0,0 +1 @@
/home/christian/ws/one-ee/src/vmm_mad/remotes/common/not_supported.sh

View File

@ -0,0 +1,50 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
##############################################################################
# VNC Options
##############################################################################
#
# Options to customize the VNC access to the container:
# - :command: to be executed in the VNC terminal.
# - :width: of the terminal
# - :height: of the terminal
# - :timeout: seconds to close the terminal if no input has been received
:vnc:
:command: 'sudo lxc-console'
:width: 800
:height: 600
:timeout: 300
##############################################################################
# Mapper Options
##############################################################################
#
# Default filesystem format for LXD-driver formatted disks. Value should be
# the same as would be in mkfs -t $filesystem $device
:filesystem: ext4
################################################################################
# OpenNebula Configuration Options
################################################################################
#
# Default path for the datastores. This only need to be change if the
# corresponding value in oned.conf has been modified.
:datastore_location: /var/lib/one/datastores
#
# Path to the LXC default configuration file. This file will be included in
# the configuration of every LXC container (lxc.include)
:default_lxc_config: /usr/share/lxc/config/common.conf

View File

@ -0,0 +1 @@
/home/christian/ws/one-ee/src/vmm_mad/remotes/common/not_supported.sh

View File

@ -0,0 +1 @@
/home/christian/ws/one-ee/src/vmm_mad/remotes/common/not_supported.sh

View File

@ -0,0 +1 @@
/home/christian/ws/one-ee/src/vmm_mad/remotes/common/not_supported.sh

38
src/vmm_mad/remotes/lxc/reboot Executable file
View File

@ -0,0 +1,38 @@
#!/usr/bin/ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
$LOAD_PATH.unshift File.dirname(__FILE__)
require 'container'
require_relative '../../scripts_common'
xml = STDIN.read # VMM_DRIVER_ACTION_DATA
client = LXCClient.new
container = Container.new_from_xml(xml, client)
# Stop VNC before reboot
container.vnc('stop')
exit(-1) unless container.reboot
# Restart VNC
container.vnc('start')
exit 0

View File

@ -0,0 +1 @@
/home/christian/ws/one-ee/src/vmm_mad/remotes/common/not_supported.sh

19
src/vmm_mad/remotes/lxc/reset Executable file
View File

@ -0,0 +1,19 @@
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
$(dirname $0)/reboot "$@" <<< $(cat -)

View File

@ -0,0 +1 @@
/home/christian/ws/one-ee/src/vmm_mad/remotes/common/not_supported.sh

View File

@ -0,0 +1 @@
/home/christian/ws/one-ee/src/vmm_mad/remotes/common/not_supported.sh

View File

@ -0,0 +1 @@
/home/christian/ws/one-ee/src/vmm_mad/remotes/common/not_supported.sh

View File

@ -0,0 +1 @@
/home/christian/ws/one-ee/src/vmm_mad/remotes/common/not_supported.sh

View File

@ -0,0 +1,35 @@
#!/usr/bin/ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
$LOAD_PATH.unshift File.dirname(__FILE__)
require 'container'
require_relative '../../scripts_common'
xml = STDIN.read
client = LXCClient.new
container = Container.new_from_xml(xml, client)
# Stop vnc
container.vnc('stop')
exit(-1) unless container.shutdown
exit 0

View File

@ -0,0 +1 @@
/home/christian/ws/one-ee/src/vmm_mad/remotes/common/not_supported.sh

View File

@ -0,0 +1 @@
/home/christian/ws/one-ee/src/vmm_mad/remotes/common/not_supported.sh

View File

@ -0,0 +1 @@
/home/christian/ws/one-ee/src/vmm_mad/remotes/common/not_supported.sh

View File

@ -166,8 +166,8 @@ module VNMMAD
end
# A NIC using Firecracker. This class implements functions to get the
# physical interface that the NIC is using, based on the MAC address
# A NIC using Firecracker. This class implements functions to get (by
# its name) the host network interface that the NIC is using.
class NicFirecracker < Hash
VNMNetwork::HYPERVISORS['firecracker'] = self
@ -180,7 +180,27 @@ module VNMMAD
nil
end
# Look for the tap in config
def get_tap(vm)
self[:tap] = "#{vm.deploy_id}-#{self[:nic_id]}"
self
end
end
# A NIC using LXC. This class implements functions to get (by its name)
# the host network interface that the NIC is using.
class NicLXC < Hash
VNMNetwork::HYPERVISORS['lxc'] = self
def initialize
super(nil)
end
def get_info(_vm)
nil
end
def get_tap(vm)
self[:tap] = "#{vm.deploy_id}-#{self[:nic_id]}"