1
0
mirror of https://github.com/OpenNebula/one.git synced 2024-12-22 13:33:52 +03:00

F #4089, #2859: Fix FC and Monitoring

* Adjust xpaths for Sunstone
    * New MONTIORING records format
    * Fix OCA API
    * Improvements on dockerhub importer
    * Improvements on firecracker context mapper
    * Fix shutdown on firecracker
    * Add logging to firecraker
This commit is contained in:
Ruben S. Montero 2020-03-23 10:45:21 +01:00
parent 45899f763d
commit 43daf0a587
No known key found for this signature in database
GPG Key ID: A0CEA6FA880A1D87
55 changed files with 1247 additions and 302 deletions

View File

@ -21,6 +21,7 @@
#include <set>
#include <vector>
#include <string>
#include <functional>
#include <libxml/tree.h>
#include <libxml/parser.h>
@ -474,6 +475,17 @@ public:
return attributes.empty();
}
/**
* Generic iterator over Template attributes
*/
void each_attribute(std::function<void(const Attribute * a)>&& f) const
{
for(const auto& it: attributes)
{
f(it.second);
}
}
protected:
/**
* The template attributes

View File

@ -22,7 +22,28 @@
#include <string>
/**
* Virtual Machine Monitor class, stores the monitor data for the VM
* Virtual Machine Monitor class, stores the monitor data for the VM in
* template format.
*
* The template is free format but the following keys are used:
* - ID of the VM (mandatory)
* - TIMESTAMP of the monitoring record (mandatory)
* - CPU, MEMORY
*
* Example:
*
* <MONITORING>
* <TIMESTAMP>1584698508</TIMESTAMP>
* <ID>0</ID>
* <CPU><![CDATA[5.02]]></CPU>
* <DISKRDBYTES><![CDATA[346366848]]></DISKRDBYTES>
* <DISKRDIOPS><![CDATA[9935]]></DISKRDIOPS>
* <DISKWRBYTES><![CDATA[1058840064]]></DISKWRBYTES>
* <DISKWRIOPS><![CDATA[107167]]></DISKWRIOPS>
* <MEMORY><![CDATA[1098912]]></MEMORY>
* <NETRX><![CDATA[567412942]]></NETRX>
* <NETTX><![CDATA[3592223]]></NETTX>
* </MONITORING>
*/
class VirtualMachineMonitorInfo
{
@ -39,6 +60,39 @@ public:
{
}
/**
* @return a xml string representation of the monitoring record
*/
std::string to_xml() const;
/**
* @return a xml string including only STATE, CPU and MEMORY attributes
*/
std::string to_xml_short() const;
/**
* Loads an exisiting monitoring record from xml_string.
* @param xml_string representation
* @return 0 on succes, -1 otherwise
*/
int from_xml(const std::string& xml_string);
/**
* The contents of the provided template are merged with any previous
* exisiting data, preserving it.
* @param tmpl with monitoring attributes
* @return 0 on succes, -1 otherwise
*/
int from_template(const Template &tmpl);
/**
* Reset monitoring data to zero
*/
void reset_info();
// -------------------------------------------------------------------------
// Class set/getters
// -------------------------------------------------------------------------
int oid() const { return _oid; }
void oid(int oid) { _oid = oid; }
@ -47,26 +101,6 @@ public:
void timestamp(time_t timestamp) { _timestamp = timestamp; }
std::string to_xml() const;
std::string to_xml_extended() const;
std::string to_xml_short() const;
/**
* Fills monitoring data from xml_string
* If some data are not contained, keep old data
* @return 0 on succes, -1 otherwise
*/
int from_xml(const std::string& xml_string);
int from_template(const Template &tmpl);
/**
* Reset monitoring data to zero
*/
void reset_info();
private:
int _oid;
time_t _timestamp;

View File

@ -293,6 +293,12 @@ VAR_DIRS="$VAR_LOCATION/remotes \
$VAR_LOCATION/remotes/im/lxd-probes.d/host/system \
$VAR_LOCATION/remotes/im/lxd-probes.d/vm/monitor \
$VAR_LOCATION/remotes/im/lxd-probes.d/vm/status \
$VAR_LOCATION/remotes/im/firecracker.d \
$VAR_LOCATION/remotes/im/firecracker-probes.d/host/beacon \
$VAR_LOCATION/remotes/im/firecracker-probes.d/host/monitor \
$VAR_LOCATION/remotes/im/firecracker-probes.d/host/system \
$VAR_LOCATION/remotes/im/firecracker-probes.d/vm/monitor \
$VAR_LOCATION/remotes/im/firecracker-probes.d/vm/status \
$VAR_LOCATION/remotes/im/vcenter.d \
$VAR_LOCATION/remotes/im/ec2.d \
$VAR_LOCATION/remotes/im/az.d \
@ -473,6 +479,7 @@ INSTALL_FILES=(
IM_PROBES_FILES:$VAR_LOCATION/remotes/im
IM_PROBES_LIB_FILES:$VAR_LOCATION/remotes/im/lib
IM_PROBES_KVM_FILES:$VAR_LOCATION/remotes/im/kvm.d
IM_PROBES_FIRECRACKER_FILES:$VAR_LOCATION/remotes/im/firecracker.d
IM_PROBES_DUMMY_FILES:$VAR_LOCATION/remotes/im/dummy.d
IM_PROBES_LXD_FILES:$VAR_LOCATION/remotes/im/lxd.d
IM_PROBES_KVM_HOST_BEACON_FILES:$VAR_LOCATION/remotes/im/kvm-probes.d/host/beacon
@ -499,6 +506,12 @@ INSTALL_FILES=(
IM_PROBES_ONE_FILES:$VAR_LOCATION/remotes/im/one.d
IM_PROBES_PACKET_FILES:$VAR_LOCATION/remotes/im/packet.d
IM_PROBES_VERSION:$VAR_LOCATION/remotes
IM_PROBES_FIRECRACKER_HOST_BEACON_FILES:$VAR_LOCATION/remotes/im/firecracker-probes.d/host/beacon
IM_PROBES_FIRECRACKER_HOST_MONITOR_FILES:$VAR_LOCATION/remotes/im/firecracker-probes.d/host/monitor
IM_PROBES_FIRECRACKER_HOST_SYSTEM_FILES:$VAR_LOCATION/remotes/im/firecracker-probes.d/host/system
IM_PROBES_FIRECRACKER_VM_MONITOR_FILES:$VAR_LOCATION/remotes/im/firecracker-probes.d/vm/monitor
IM_PROBES_FIRECRACKER_VM_STATUS_FILES:$VAR_LOCATION/remotes/im/firecracker-probes.d/vm/status
IM_PROBES_ETC_FIRECRACKER_PROBES_FILES:$VAR_LOCATION/remotes/etc/im/firecracker-probes.d
AUTH_SSH_FILES:$VAR_LOCATION/remotes/auth/ssh
AUTH_X509_FILES:$VAR_LOCATION/remotes/auth/x509
AUTH_LDAP_FILES:$VAR_LOCATION/remotes/auth/ldap
@ -952,8 +965,8 @@ VMM_EXEC_FIRECRACKER_SCRIPTS="src/vmm_mad/remotes/firecracker/deploy \
VMM_EXEC_FIRECRACKER_LIB="src/vmm_mad/remotes/lib/firecracker/opennebula_vm.rb \
src/vmm_mad/remotes/lib/firecracker/client.rb \
src/vmm_mad/remotes/lib/firecracker/microvm.rb \
src/vmm_mad/remotes/lib/firecracker/map_context.sh \
src/vmm_mad/remotes/lib/firecracker/clean.sh \
src/vmm_mad/remotes/lib/firecracker/map_context \
src/vmm_mad/remotes/lib/firecracker/clean_fc \
src/vmm_mad/remotes/lib/firecracker/command.rb"
#-------------------------------------------------------------------------------
# VMM configuration LXD scripts, to be installed under $REMOTES_LOCATION/etc/vmm/lxd
@ -1123,6 +1136,7 @@ IM_PROBES_LIB_FILES="\
src/im_mad/remotes/lib/kvm.rb \
src/im_mad/remotes/lib/lxd.rb \
src/im_mad/remotes/lib/linux.rb \
src/im_mad/remotes/lib/firecracker.rb\
src/im_mad/remotes/lib/numa_common.rb \
src/im_mad/remotes/lib/probe_db.rb"
@ -1219,6 +1233,37 @@ IM_PROBES_ETC_LXD_PROBES_FILES="\
src/im_mad/remotes/lxd-probes.d/pci.conf \
src/im_mad/remotes/lib/probe_db.conf"
# Firecracker PROBES
IM_PROBES_FIRECRACKER_FILES="\
src/im_mad/remotes/firecracker.d/monitord-client_control.sh \
src/im_mad/remotes/firecracker.d/monitord-client.rb"
IM_PROBES_FIRECRACKER_HOST_BEACON_FILES="\
src/im_mad/remotes/firecracker-probes.d/host/beacon/monitord-client-shepherd.sh \
src/im_mad/remotes/firecracker-probes.d/host/beacon/date.sh"
IM_PROBES_FIRECRACKER_HOST_MONITOR_FILES="\
src/im_mad/remotes/firecracker-probes.d/host/monitor/linux_usage.rb \
src/im_mad/remotes/firecracker-probes.d/host/monitor/numa_usage.rb"
IM_PROBES_FIRECRACKER_HOST_SYSTEM_FILES="\
src/im_mad/remotes/firecracker-probes.d/host/system/architecture.sh \
src/im_mad/remotes/firecracker-probes.d/host/system/cpu.sh \
src/im_mad/remotes/firecracker-probes.d/host/system/linux_host.rb \
src/im_mad/remotes/firecracker-probes.d/host/system/monitor_ds.rb \
src/im_mad/remotes/firecracker-probes.d/host/system/name.sh \
src/im_mad/remotes/firecracker-probes.d/host/system/numa_host.rb \
src/im_mad/remotes/firecracker-probes.d/host/system/version.sh"
IM_PROBES_FIRECRACKER_VM_MONITOR_FILES="\
src/im_mad/remotes/firecracker-probes.d/vms/monitor/poll.rb \
src/im_mad/remotes/firecracker-probes.d/vms/monitor/monitor_ds_vm.rb"
IM_PROBES_FIRECRACKER_VM_STATUS_FILES="\
src/im_mad/remotes/firecracker-probes.d/vms/status/state.rb"
IM_PROBES_ETC_FIRECRACKER_PROBES_FILES="src/im_mad/remotes/lib/probe_db.conf"
IM_PROBES_VCENTER_FILES="src/im_mad/remotes/vcenter.d/poll"
IM_PROBES_EC2_FILES="src/im_mad/remotes/ec2.d/poll"
@ -1558,6 +1603,7 @@ TM_ISCSI_FILES="src/tm_mad/iscsi_libvirt/clone \
DATASTORE_DRIVER_COMMON_SCRIPTS="src/datastore_mad/remotes/xpath.rb \
src/datastore_mad/remotes/downloader.sh \
src/datastore_mad/remotes/lxd_downloader.sh \
src/datastore_mad/remotes/docker_downloader.sh \
src/datastore_mad/remotes/vcenter_uploader.rb \
src/datastore_mad/remotes/vcenter_downloader.rb \
src/datastore_mad/remotes/url.rb \

View File

@ -24,12 +24,12 @@
:desc: Actual status
:size: 4
:UCPU:
:desc: CPU percentage used by the VM
:CPU:
:desc: CPU assigned to the VM
:size: 4
:UMEM:
:desc: Memory used by the VM
:MEM:
:desc: Memory assigned by the VM
:size: 7
:HOST:
@ -58,8 +58,8 @@
- :GROUP
- :NAME
- :STAT
- :UCPU
- :UMEM
- :CPU
- :MEM
- :HOST
- :TIME
@ -69,7 +69,7 @@
- :NAME
- :IP
- :STAT
- :UCPU
- :UMEM
- :CPU
- :MEM
- :HOST
- :TIME

View File

@ -82,7 +82,7 @@ class OneImageHelper < OpenNebulaHelper::OneHelper
:description => 'Path of the image file',
:format => String,
:proc => lambda do |o, _options|
next [0, o] if o.match(%r{^https?://})
next [0, o] if o.match(%r{^(https?|docker)://})
if o[0, 1]=='/'
path=o

View File

@ -345,15 +345,15 @@ class OneVMHelper < OpenNebulaHelper::OneHelper
OneVMHelper.state_to_str(d['STATE'], d['LCM_STATE'])
end
column :UCPU, 'CPU percentage used by the VM', :size => 4 do |d|
cpu = d['MONITORING']['CPU']
column :CPU, 'CPU asigned to the VM', :size => 4 do |d|
cpu = d['TEMPLATE']['CPU']
cpu = '0' if cpu.nil?
cpu
end
column :UMEM, 'Memory used by the VM', :size => 7 do |d|
OpenNebulaHelper.unit_to_str(d['MONITORING']['MEMORY'].to_i,
column :MEM, 'Memory asigned to the VM', :size => 7 do |d|
OpenNebulaHelper.unit_to_str(d['TEMPLATE']['MEMORY'].to_i,
options)
end
@ -395,7 +395,7 @@ class OneVMHelper < OpenNebulaHelper::OneHelper
OneVMHelper.ip_str(d)
end
default :ID, :USER, :GROUP, :NAME, :STAT, :UCPU, :UMEM, :HOST,
default :ID, :USER, :GROUP, :NAME, :STAT, :CPU, :MEM, :HOST,
:TIME
end

View File

@ -0,0 +1,300 @@
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
if [ -z "${ONE_LOCATION}" ]; then
LIB_LOCATION=/usr/lib/one
VAR_LOCATION=/var/lib/one
else
LIB_LOCATION=$ONE_LOCATION/lib
VAR_LOCATION=$ONE_LOCATION/var
fi
. $LIB_LOCATION/sh/scripts_common.sh
#-------------------------------------------------------------------------------
# Downloader configuration attributes
#-------------------------------------------------------------------------------
DRIVER_PATH=$(dirname $0)
MARKET_URL=$1
# URL with the context releases
CONTEXT_API="https://api.github.com/repos/OpenNebula/addon-context-linux/releases"
CONTEXT_URL="https://github.com/OpenNebula/addon-context-linux/releases/download"
PKG_APK="curl openssh"
PKG_DEB="curl "
PKG_RPM="openssh-server"
PKG_CENTOS6="epel-release $PKG_RPM"
PKG_FEDORA="network-scripts $PKG_RPM"
#Default DNS server to download the packages
DNS_SERVER="1.1.1.1"
#Directory used to download packages
TMP_DIR=/var/tmp
#Curl command and options used to download container image
CURL="curl -L"
#-------------------------------------------------------------------------------
# This function returns the associated context packages version to the installed
# OpenNebula version
#-------------------------------------------------------------------------------
function get_tag_name {
local version=`oned -v | grep "is distributed" | awk '{print $2}' | tr -d .`
if [ `echo $version | wc -c` -eq 4 ]; then
version=$(($version * 10))
fi
local creleases=`curl -sSL $CONTEXT_API | grep "\"tag_name\":" | \
awk '{print $2}' | cut -d 'v' -f 2 | cut -d '"' -f 1`
for tag in `echo $creleases`; do
local cversion=`echo $tag | tr -d .`
if [ `echo $cversion | wc -c` -eq 4 ]; then
cversion=$(($cversion * 10))
fi
if [ $cversion -le $version ]; then
echo "$tag"
break
fi
done
}
#-------------------------------------------------------------------------------
# This function takes care of removing all temporary directories in case
# something fails
#-------------------------------------------------------------------------------
function clean_err {
docker rm -f $container_id > /dev/null 2>&1 || true
docker image rm -f one$sid > /dev/null 2>&1
# Unmount mnt directory (if necessary)
if grep -qs "$dockerdir/mnt" /proc/mounts; then
sudo umount "$dockerdir/mnt"
fi
rm -rf $dockerdir
}
#-------------------------------------------------------------------------------
# This function takes care of checking if the container FS will fit in the size
# passed as parameter, if not it will used the size of the container FS + 200MB
#-------------------------------------------------------------------------------
function get_size {
# Get tarbal size
tar_size=$(stat -c%s $tarball | awk '{ byte =$1 /1024/1024; print byte}' | cut -d '.' -f 1)
if [ "$tar_size" -ge "$size" ]; then
size=$(($tar_size + 200))
fi
}
set -e -o pipefail
#-------------------------------------------------------------------------------
# Parse downloader URL
#-------------------------------------------------------------------------------
# URL is in the form
# docker://<conatiner_name>[:tags]?size=&filesystem=&format=&distro=
#
# consinter_name:tags : As listed in docker hub. e.g. alpine:3.9
# size: in MB for the resulting image
# filesystem: filesystem type e.g. ext4
# format: image format e.g. raw or qcow2
# distro: base image distro to install contents
#-------------------------------------------------------------------------------
id=`uuidgen`
sid=`echo $id | cut -d '-' -f 1`
url=`echo $MARKET_URL | grep -oP "^"docker://"\K.*"`
docker_hub=`echo $url | cut -d '?' -f 1`
arguments=`echo $url | cut -d '?' -f 2`
selected_tag=`get_tag_name`
#Create a shell variable for every argument (size=5219, format=raw...)
for p in ${arguments//&/ }; do
kvp=( ${p/=/ } );
k=${kvp[0]};v=${kvp[1]};
[ -n "$k" -a -n "$v" ] && eval $k=$v;
done
docker_image=`echo $docker_hub | cut -f1 -d':'``echo $id |cut -f1 -d'-'`
dockerdir=$TMP_DIR/$id
dockerfile=$dockerdir/dockerfile
tarball=$dockerdir/fs.tar
img_raw=$dockerdir/img.raw
img_qcow=$dockerdir/img.qcow
# Trap for cleaning temporary directories
trap clean_err ERR
mkdir -p $dockerdir
mkdir -p $dockerdir/mnt
#-------------------------------------------------------------------------------
# Create a DockerFile
#-------------------------------------------------------------------------------
case "$distro" in
debian|ubuntu)
contextpkg=$dockerdir/context.deb
contexturl=$CONTEXT_URL/v$selected_tag/one-context_$selected_tag-1.deb
commands=$(cat <<EOC
COPY context.deb /root/context.deb
RUN apt-get update
RUN apt-get update && apt-get install -y \
curl \
dbus \
kmod \
iproute2 \
iputils-ping \
net-tools \
openssh-server \
sudo \
systemd \
udev \
vim-tiny \
wget
RUN apt-get install -y /root/context.deb
RUN rm /root/context.deb
RUN apt-get clean && \
rm -rf /var/lib/apt/lists/*
EOC
)
;;
redhat)
terminal="/bin/bash"
commands=$(cat <<EOC
EOC
)
;;
alpine)
terminal="/bin/ash"
contextpkg=$dockerdir/context.apk
contexturl=$CONTEXT_URL/v$selected_tag/one-context-$selected_tag-r1.apk
commands=$(cat <<EOC
COPY context.apk /root/context.apk
RUN apk add coreutils \
openrc \
udev \
openssh
RUN rc-update add sysfs boot && \
rc-update add devfs boot && \
rc-update add procfs boot && \
rc-update add hostname boot
RUN echo "ttyS0::respawn:/sbin/getty -L ttyS0 115200 vt100" >> /etc/inittab
RUN apk add --allow-untrusted /root/context.apk
RUN rm /root/context.apk
RUN rc-update del one-context boot && \
rc-update add one-context default
RUN rc-update add sshd default && \
rc-update add udev default && \
rc-update add networking default
RUN echo 'rc_sys=""' >> /etc/rc.conf
RUN sed -e '159a dev_context=/dev/vdb' \
-e '169s/.*/\t\tmount -o ro \/dev\/vdb \${MOUNT_DIR} 2\>\/dev\/null/' \
-i /usr/sbin/one-contextd
EOC
)
;;
*)
;;
esac
cat > $dockerfile <<EOC
FROM $docker_hub
$commands
RUN echo "#Generated by OpenNebula" > /etc/resolv.conf
RUN rm -f /etc/ssh/ssh_host_* > /dev/null 2>&1
RUN usermod -p '*' root > /dev/null 2>&1
EOC
$CURL $contexturl -Lsfo $contextpkg > /dev/null 2>&1
docker build -t one$sid -f $dockerfile $dockerdir > /dev/null 2>&1
image_id=`docker images -q one$sid`
#-------------------------------------------------------------------------------
# Flatten container image
#-------------------------------------------------------------------------------
container_id=$(docker run -d $image_id /bin/true)
docker export -o $tarball $container_id > /dev/null 2>&1
docker rm $container_id > /dev/null 2>&1
docker image rm one$sid > /dev/null 2>&1
#-------------------------------------------------------------------------------
# Dump container FS and create image
#-------------------------------------------------------------------------------
# Ensure $size have a good value
get_size
qemu-img create -f raw $img_raw ${size}M > /dev/null 2>&1
case $filesystem in
"ext4")
mkfs.ext4 -F $img_raw > /dev/null 2>&1
;;
"xfs")
mkfs.xfs -f $img_raw > /dev/null 2>&1
;;
*)
mkfs.ext4 -F $img_raw > /dev/null 2>&1
;;
esac
#-------------------------------------------------------------------------------
# Mount container disk image and untar rootfs contents to it
#-------------------------------------------------------------------------------
sudo mount $img_raw $dockerdir/mnt > /dev/null 2>&1
sudo chmod o+w $dockerdir/mnt
sudo tar xpf $tarball -C $dockerdir/mnt > /dev/null 2>&1
sync
sudo umount $dockerdir/mnt
if [ "$format" == "qcow2" ]; then
qemu-img convert -f raw -O qcow2 $img_raw $img_qcow > /dev/null 2>&1
cat $img_qcow
else
cat $img_raw
fi
#-------------------------------------------------------------------------------
# Clean up files & dirs
#-------------------------------------------------------------------------------
rm -rf $dockerdir
exit 0

View File

@ -309,7 +309,6 @@ ssh://*)
command="ssh ${ssh_arg[0]} $rmt_cmd"
;;
s3://*)
# Read s3 environment
s3_env
@ -332,6 +331,10 @@ lxd://*)
file_type="application/octet-stream"
command="$VAR_LOCATION/remotes/datastore/lxd_downloader.sh \"$FROM\""
;;
docker://*)
file_type="application/octet-stream"
command="$VAR_LOCATION/remotes/datastore/docker_downloader.sh \"$FROM\""
;;
*)
if [ ! -r $FROM ]; then
echo "Cannot read from $FROM" >&2

View File

@ -251,6 +251,19 @@ function fs_size {
if [ -d "${SRC}" ]; then
SIZE=`set -o pipefail; du -sb "${SRC}" | cut -f1`
error=$?
elif (echo "${SRC}" | grep -qe '^docker\?://'); then
url=`echo ${SRC} | grep -oP "^"docker://"\K.*"`
arguments=`echo $url | cut -d '?' -f 2`
for p in ${arguments//&/ }; do
kvp=( ${p/=/ } );
if [ ${kvp[0]} == 'size' ]; then
SIZE=$((${kvp[1]} * 1024));
error=0
break
fi
done
elif [ -f "${SRC}" ] || (echo "${SRC}" | grep -qe '^https\?://'); then
IMAGE=$(mktemp)

View File

@ -44,6 +44,8 @@ CLIENT_PID_FILE=/tmp/one-monitord-client.pid
# Launch the client
function start_client() {
rm $CLIENT_PID_FILE >/dev/null 2>&1
echo "$STDIN" | /usr/bin/env ruby $CLIENT $ARGV &
echo $! > $CLIENT_PID_FILE

View File

@ -47,6 +47,8 @@ CLIENT_PID_FILE=/tmp/one-monitord-$HID.pid
# Launch the client
function start_client() {
rm $CLIENT_PID_FILE >/dev/null 2>&1
echo "$STDIN" | base64 -d - | /usr/bin/env ruby $CLIENT $ARGV &
echo $! > $CLIENT_PID_FILE

View File

@ -0,0 +1 @@
../../../common.d/date.sh

View File

@ -0,0 +1,29 @@
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
(
[ -f /tmp/one-monitord-client.pid ] || exit 0
running_pid=$(cat /tmp/one-monitord-client.pid)
pids=$(ps axuwww | grep -e "/monitord-client.rb firecracker" | grep -v grep | awk '{ print $2 }' | grep -v "^${running_pid}$")
if [ -n "$pids" ]; then
kill -6 $pids
fi
) > /dev/null

View File

@ -0,0 +1,21 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require_relative '../../../lib/linux'
LinuxHost.usage('firecracker')

View File

@ -0,0 +1 @@
../../../node-probes.d/numa_usage.rb

View File

@ -0,0 +1 @@
../../../node-probes.d/architecture.sh

View File

@ -0,0 +1 @@
../../../node-probes.d/cpu.sh

View File

@ -0,0 +1,21 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require_relative '../../../lib/linux'
LinuxHost.config('firecracker')

View File

@ -0,0 +1 @@
../../../node-probes.d/monitor_ds.rb

View File

@ -0,0 +1 @@
../../../node-probes.d/name.sh

View File

@ -0,0 +1 @@
../../../node-probes.d/numa_host.rb

View File

@ -0,0 +1 @@
../../../common.d/version.sh

View File

@ -0,0 +1 @@
../../../node-probes.d/monitor_ds_vm.rb

View File

@ -0,0 +1,5 @@
#!/usr/bin/ruby
require_relative '../../../lib/firecracker'
puts DomainList.info

View File

@ -0,0 +1,24 @@
#!/usr/bin/ruby
require_relative '../../../lib/probe_db'
require_relative '../../../lib/firecracker'
xml_txt = STDIN.read
begin
config = REXML::Document.new(xml_txt).root
sync = config.elements['PROBES_PERIOD/SYNC_STATE_VM'].text.to_i
rescue StandardError
sync = 180
end
begin
vmdb = VirtualMachineDB.new('firecracker',
:missing_state => 'POWEROFF',
:sync => sync)
vmdb.purge
puts vmdb.to_status
rescue StandardError => e
puts e
end

View File

@ -0,0 +1 @@
../common.d/monitord-client.rb

View File

@ -0,0 +1 @@
../common.d/monitord-client_control.sh

View File

@ -0,0 +1,479 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
$LOAD_PATH.unshift "#{File.dirname(__FILE__)}/../../vmm/firecracker/"
require 'json'
require 'base64'
require 'client'
#-------------------------------------------------------------------------------
# Firecracker Monitor Module. This module provides basic functionality to
# retrieve Firecracker instances information
#-------------------------------------------------------------------------------
module Firecracker
###########################################################################
# MicroVM metrics/info related methods
###########################################################################
def self.flush_metrics(uuid)
begin
socket = "/srv/jailer/firecracker/#{uuid}/root/api.socket"
client = FirecrackerClient.new(socket)
data = '{"action_type": "FlushMetrics"}'
client.put('actions', data)
rescue StandardError, FirecrackerError
return false
end
true
end
def self.metrics(uuid)
metrics_path = "/srv/jailer/firecracker/#{uuid}/root/metrics.fifo"
# clear previos logs
File.open(metrics_path, 'w') {|file| file.truncate(0) }
# Flush metrics
flush_metrics(uuid)
# Read metrics
metrics_f = File.read(metrics_path).split("\n")[-1]
metrics_f.tr!("\u0000", '')
JSON.parse(metrics_f)
end
def self.machine_config(uuid)
begin
socket = "/srv/jailer/firecracker/#{uuid}/root/api.socket"
client = FirecrackerClient.new(socket)
response = client.get('machine-config')
rescue StandardError, FirecrackerError
return
end
###################################################################
# Machine config will return a JSON with the following information
# {
# "vcpu_count": <int>,
# "mem_size_mib": <int>,
# "ht_enabled": <bool>, # Todo, support it
# "cpu_template": <string> # Todo, support it
# }
###################################################################
response
end
def self.general_info(uuid)
begin
socket = "/srv/jailer/firecracker/#{uuid}/root/api.socket"
client = FirecrackerClient.new(socket)
response = client.get('')
rescue StandardError, FirecrackerError
return
end
###################################################################
# General info will return a JSON with the following information
# {
# "id": <string> # (e.g "one-352")
# "state": <string>, # Check Domain::STATE_MAP
# "vmm_version": <string> # (e.g "0.20.0")
# }
###################################################################
response
end
def self.retrieve_info(uuid)
vm_info = {}
vm_info.merge!(machine_config(uuid))
vm_info.merge!(general_info(uuid))
end
end
#-------------------------------------------------------------------------------
# This module gets the pid, memory and cpu usage of a set of process that
# includes a -uuid argument (qemu-kvm vms).
#
# Usage is computed based on the fraction of jiffies used by the process
# relative to the system during AVERAGE_SECS (1s)
#-------------------------------------------------------------------------------
module ProcessList
# Number of seconds to average process usage
AVERAGE_SECS = 1
# list of process indexed by uuid, each entry:
# :pid
# :memory
# :cpu
def self.process_list
pids = []
procs = {}
ps = `ps auxwww`
ps.each_line do |l|
m = l.match(/firecracker.+(one-\d+)/)
next unless m
l = l.split(/\s+/)
swap = `cat /proc/#{l[1]}/status 2>/dev/null | grep VmSwap`
swap = swap.split[1] || 0
procs[m[1]] = {
:pid => l[1],
:memory => l[5].to_i + swap.to_i
}
pids << l[1]
end
cpu = cpu_info(pids)
procs.each {|_i, p| p[:cpu] = cpu[p[:pid]] || 0 }
procs
end
def self.retrieve_names
ps = `ps auxwww`
domains = []
ps.each_line do |l|
m = l.match(/firecracker.+(one-\d+)/)
next unless m
domains << m[1]
end
domains
end
# Get cpu usage in 100% for a set of PIDs
# param[Array] pids of the arrys to compute the CPU usage
# result[Array] array of cpu usage
def self.cpu_info(pids)
multiplier = Integer(`grep -c processor /proc/cpuinfo`) * 100
cpu_ini = {}
j_ini = jiffies
pids.each do |pid|
cpu_ini[pid] = proc_jiffies(pid).to_f
end
sleep AVERAGE_SECS
cpu_j = jiffies - j_ini
cpu = {}
pids.each do |pid|
cpu[pid] = (proc_jiffies(pid) - cpu_ini[pid]) / cpu_j
cpu[pid] = (cpu[pid] * multiplier).round(2)
end
cpu
end
# CPU tics used in the system
def self.jiffies
stat = File.open('/proc/stat', 'r') {|f| f.readline }
j = 0
stat.split(' ')[1..-3].each {|num| j += num.to_i }
j
rescue StandardError
0
end
# CPU tics used by a process
def self.proc_jiffies(pid)
stat = File.read("/proc/#{pid}/stat")
j = 0
data = stat.lines.first.split(' ')
[13, 14, 15, 16].each {|col| j += data[col].to_i }
j
rescue StandardError
0
end
end
#-------------------------------------------------------------------------------
# This class represents a Firecracker domain, information includes:
# @vm[:name]
# @vm[:id] from one-<id>
# @vm[:uuid] (deployment id)
# @vm[:fc_state] Firecracker state
# @vm[:state] OpenNebula state
# @vm[:netrx]
# @vm[:nettx]
# @vm[:diskrdbytes]
# @vm[:diskwrbytes]
# @vm[:diskrdiops]
# @vm[:diskwriops]
#
# This class uses the KVM and ProcessList interface
#-------------------------------------------------------------------------------
class Domain
attr_reader :vm, :name
def initialize(name)
@name = name
@vm = {}
end
# Gets the information of the domain, fills the @vm hash using ProcessList
# and virsh dominfo
def info
# Flush the microVM metrics
hash = Firecracker.retrieve_info(@name)
return -1 if hash.nil?
@vm[:name] = @name
@vm[:uuid] = hash['id']
m = @vm[:name].match(/^one-(\d*)$/)
if m
@vm[:id] = m[1]
else
@vm[:id] = -1
end
@vm[:fc_state] = hash['state']
state = STATE_MAP[hash['state']] || 'UNKNOWN'
@vm[:state] = state
io_stats
end
# Get domain attribute by name.
def [](name)
@vm[name]
end
def []=(name, value)
@vm[name] = value
end
# Merge hash value into the domain attributes
def merge!(map)
@vm.merge!(map)
end
# Builds an OpenNebula Template with the monitoring keys. E.g.
# CPU=125.2
# MEMORY=1024
# NETTX=224324
# NETRX=213132
# ...
#
# Keys are defined in MONITOR_KEYS constant
#
# @return [String] OpenNebula template encoded in base64
def to_monitor
mon_s = ''
MONITOR_KEYS.each do |k|
next unless @vm[k.to_sym]
mon_s << "#{k.upcase}=\"#{@vm[k.to_sym]}\"\n"
end
Base64.strict_encode64(mon_s)
end
private
# --------------------------------------------------------------------------
# Firecracker states for the guest are
# * 'Uninitialized'
# * 'Starting'
# * 'Running'
# https://github.com/firecracker-microvm/firecracker/blob/8d369e5db565441987d607f3ff24dc15fa2c8d7a/src/api_server/swagger/firecracker.yaml#L471
# --------------------------------------------------------------------------
STATE_MAP = {
'Uninitialized' => 'FAILURE',
'Starting' => 'RUNNING',
'Running' => 'RUNNING'
}
MONITOR_KEYS = %w[cpu memory netrx nettx diskrdbytes diskwrbytes diskrdiops
diskwriops]
# Get the I/O stats of the domain as provided by Libvirt command domstats
# The metrics are aggregated for all DIKS and NIC
def io_stats
@vm[:netrx] = 0
@vm[:nettx] = 0
@vm[:diskrdbytes] = 0
@vm[:diskwrbytes] = 0
@vm[:diskrdiops] = 0
@vm[:diskwriops] = 0
return if @vm[:state] != 'RUNNING'
vm_metrics = Firecracker.metrics(@name)
return if vm_metrics.nil? || vm_metrics.keys.empty?
@vm[:netrx] += vm_metrics['net']['rx_bytes_count']
@vm[:nettx] += vm_metrics['net']['tx_bytes_count']
@vm[:diskrdbytes] += vm_metrics['block']['read_bytes']
@vm[:diskwrbytes] += vm_metrics['block']['write_bytes']
@vm[:diskrdiops] += vm_metrics['block']['read_count']
@vm[:diskwriops] += vm_metrics['block']['write_count']
end
end
#-------------------------------------------------------------------------------
# This module provides a basic interface to get the list of domains in
# the system and convert the information to be added to monitor or system
# messages.
#
# It also gathers the state information of the domains for the state probe
#-------------------------------------------------------------------------------
module DomainList
############################################################################
# Module Interface
############################################################################
def self.info
domains = FirecrackerDomains.new
domains.info
domains.to_monitor
end
def self.state_info
domains = FirecrackerDomains.new
domains.state_info
end
############################################################################
# This is the implementation class for the module logic
############################################################################
class FirecrackerDomains
include Firecracker
include ProcessList
def initialize
@vms = {}
end
# Get the list of VMs (known to OpenNebula) and their monitor info
# including process usage
#
# @return [Hash] with KVM Domain classes indexed by their uuid
def info
info_each(true) do |name|
vm = Domain.new name
next if vm.info == -1
vm
end
end
# Get the list of VMs and their info
# not including process usage.
#
# @return [Hash] with KVM Domain classes indexed by their uuid
def state_info
info_each(false) do |name|
vm = Domain.new name
next if vm.info == -1
vm
end
end
# Return a message string with VM monitor information
def to_monitor
mon_s = ''
@vms.each do |_uuid, vm|
mon_s << "VM = [ ID=\"#{vm[:id]}\", UUID=\"#{vm[:uuid]}\","
mon_s << " MONITOR=\"#{vm.to_monitor}\"]\n"
end
mon_s
end
private
# Generic build method for the info list. It filters and builds the
# domain list based on the given block
# @param[Boolean] do_process, to get process information
def info_each(do_process)
return unless block_given?
vm_ps = ProcessList.process_list if do_process
names = ProcessList.retrieve_names
return @vms if names.empty?
names.each do |name|
vm = yield(name)
@vms[vm[:uuid]] = vm if vm
end
return @vms unless do_process
vm_ps.each do |uuid, ps|
next unless @vms[uuid]
@vms[uuid].merge!(ps)
end
@vms
end
end
end

View File

@ -122,7 +122,7 @@ module ProcessList
# param[Array] pids of the arrys to compute the CPU usage
# result[Array] array of cpu usage
def self.cpu_info(pids)
multiplier = `nproc`.to_i * 100
multiplier = Integer(`grep -c processor /proc/cpuinfo`) * 100
cpu_ini = {}

View File

@ -94,7 +94,7 @@ class VirtualMachineDB
last = @db.execute("SELECT MAX(timestamp) from #{@dataset}").flatten![0]
last ||= 0
return sync_status if time > (last + @conf[:sync])
return sync_status if time > (last + @conf[:sync]) && last != 0
status_str = ''
monitor_ids = []

View File

@ -314,7 +314,7 @@ void LifeCycleManager::deploy_success_action(int vid)
vmpool->update(vm);
}
else
else if ( vm->get_lcm_state() != VirtualMachine::RUNNING)
{
vm->log("LCM",Log::ERROR,"deploy_success_action, VM in a wrong state");
}

View File

@ -137,6 +137,21 @@ IM_MAD = [
]
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# LXD UDP-push Information Driver Manager Configuration
# -r number of retries when monitoring a host
# -t number of threads, i.e. number of hosts monitored at the same time
# -w Timeout in seconds to execute external commands (default unlimited)
#-------------------------------------------------------------------------------
IM_MAD = [
NAME = "firecracker",
SUNSTONE_NAME = "Firecracker",
EXECUTABLE = "one_im_ssh",
ARGUMENTS = "-r 3 -t 15 -w 90 firecracker",
THREADS = 0
]
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# vCenter Information Driver Manager Configuration
# -r number of retries when monitoring a host

View File

@ -27,7 +27,7 @@ int VMRPCPool::update_monitoring(const VirtualMachineMonitorInfo& monitoring)
return 0;
}
auto sql_xml = db->escape_str(monitoring.to_xml_extended());
auto sql_xml = db->escape_str(monitoring.to_xml());
if (sql_xml == 0)
{

View File

@ -322,7 +322,7 @@ void HostMonitorManager::monitor_vm(int oid,
if (monitoring.from_template(tmpl) != 0)
{
string str;
NebulaLog::log("HMM", Log::ERROR, "Error parsing VM monitoring template: "
NebulaLog::log("HMM", Log::ERROR, "Error parsing VM monitoring: "
+ tmpl.to_str(str));
return;
}

View File

@ -184,8 +184,7 @@ module OpenNebula
# ["1337266088", "800"]]
# }
def monitoring(xpath_expressions)
return super(HOST_METHODS[:monitoring], 'HOST',
'LAST_MON_TIME', xpath_expressions)
return super(HOST_METHODS[:monitoring], xpath_expressions)
end
# Retrieves this Host's monitoring data from OpenNebula, in XML

View File

@ -94,8 +94,7 @@ module OpenNebula
# "HOST_SHARE/FREE_CPU"=>[["1337609673", "800"]],
# "HOST_SHARE/RUNNING_VMS"=>[["1337609673", "3"]]}}
def monitoring(xpath_expressions)
return super(HOST_POOL_METHODS[:monitoring],
'HOST', 'LAST_MON_TIME', xpath_expressions)
return super(HOST_POOL_METHODS[:monitoring], xpath_expressions)
end
# Retrieves the monitoring data for all the Hosts in the pool, in XML

View File

@ -91,9 +91,6 @@ module OpenNebula
# Retrieves the monitoring data for all the Objects in the pool
#
# @param [String] xml_method xml-rcp method
# @param [String] root_elem Root for each individual PoolElement
# @param [String] timestamp_elem Name of the XML element with the last
# monitorization timestamp
# @param [Array<String>] xpath_expressions Elements to retrieve.
# @param args arguemnts for the xml_method call
#
@ -101,8 +98,7 @@ module OpenNebula
# OpenNebula::Error] The first level hash uses the Object ID as keys,
# and as value a Hash with the requested xpath expressions,
# and an Array of 'timestamp, value'.
def monitoring(xml_method, root_elem, timestamp_elem, xpath_expressions,
*args)
def monitoring(xml_method, xpaths, *args)
rc = @client.call(xml_method, *args)
@ -125,9 +121,7 @@ module OpenNebula
end
ids.each { |id|
hash[id] = OpenNebula.process_monitoring(
xmldoc, root_elem, timestamp_elem, id, xpath_expressions)
hash[id] = OpenNebula.process_monitoring(xmldoc, id, xpaths)
}
return hash

View File

@ -183,15 +183,12 @@ module OpenNebula
# Retrieves this Element's monitoring data from OpenNebula
#
# @param [String] xml_method the name of the XML-RPC method
# @param [String] root_elem Root for each individual PoolElement
# @param [String] timestamp_elem Name of the XML element with the last
# monitorization timestamp
# @param xpath_expressions [Array<String>] Xpath expressions for the
# elements to retrieve.
# @param xpaths [Array<String>] Xpath expressions for the elements to
# retrieve.
#
# @return [Hash<String, Array<Array<int>>, OpenNebula::Error] Hash with
# the requested xpath expressions, and an Array of [timestamp, value].
def monitoring(xml_method, root_elem, timestamp_elem, xpath_expressions)
def monitoring(xml_method, xpaths)
return Error.new('ID not defined') if !@pe_id
rc = @client.call(xml_method, @pe_id)
@ -204,8 +201,7 @@ module OpenNebula
xmldoc.initialize_xml(rc, 'MONITORING_DATA')
return OpenNebula.process_monitoring(
xmldoc, root_elem, timestamp_elem, @pe_id, xpath_expressions)
return OpenNebula.process_monitoring(xmldoc, @pe_id, xpaths)
end
public
@ -261,21 +257,20 @@ module OpenNebula
# Processes the monitoring data in XML returned by OpenNebula
#
# @param [XMLElement] xmldoc monitoring data returned by OpenNebula
# @param [String] root_elem Root for each individual PoolElement
# @param [String] timestamp_elem Name of the XML element with the last
# monitorization timestamp
# @param [Integer] oid Id of the object to process
# @param [Array<String>] xpath_expressions Elements to retrieve.
#
# @return [Hash<String, Array<Array<int>>, OpenNebula::Error] Hash with
# the requested xpath expressions, and an Array of [timestamp, value].
def self.process_monitoring(xmldoc, root_elem, timestamp_elem, oid, xpath_expressions)
def self.process_monitoring(xmldoc, oid, xpath_expressions)
hash = {}
timestamps = xmldoc.retrieve_elements(
"#{root_elem}[ID=#{oid}]/#{timestamp_elem}")
"/MONITORING_DATA/MONITORING[ID=#{oid}]/TIMESTAMP")
xpath_expressions.each { |xpath|
xpath_values = xmldoc.retrieve_elements("#{root_elem}[ID=#{oid}]/#{xpath}")
xpath_values = xmldoc.retrieve_elements(
"/MONITORING_DATA/MONITORING[ID=#{oid}]/#{xpath}")
if ( xpath_values.nil? )
hash[xpath] = []
@ -287,27 +282,4 @@ module OpenNebula
return hash
end
# Alternative method with better performance for huge number of timestamps.
# For reasonable amounts of data, the current method is quicker
=begin
def self.process_monitoring(xmldoc, root_elem, timestamp_elem, oid, xpath_expressions)
hash = {}
xpath_expressions.each { |xpath|
hash[xpath] = []
}
xmldoc.each("#{root_elem}[ID=#{oid}]") do |elem|
timestamp = elem[timestamp_elem]
xpath_expressions.each { |xpath|
xpath_value = elem[xpath]
hash[xpath] << [timestamp, xpath_value] if !xpath_value.nil?
}
end
return hash
end
=end
end

View File

@ -570,8 +570,7 @@ module OpenNebula
# }
#
def monitoring(xpath_expressions)
return super(VM_METHODS[:monitoring], 'VM',
'LAST_POLL', xpath_expressions)
return super(VM_METHODS[:monitoring], xpath_expressions)
end
# Retrieves this VM's monitoring data from OpenNebula, in XML

View File

@ -191,9 +191,8 @@ module OpenNebula
# }
# }
#
def monitoring(xpath_expressions, filter_flag=INFO_ALL)
return super(VM_POOL_METHODS[:monitoring],
'VM', 'LAST_POLL', xpath_expressions, filter_flag)
def monitoring(xpaths, filter_flag=INFO_ALL)
return super(VM_POOL_METHODS[:monitoring], xpaths, filter_flag)
end
# Retrieves the monitoring data for all the VMs in the pool, in XML

View File

@ -76,21 +76,21 @@ define(function(require) {
data: {
id: this.element.ID,
monitor: {
monitor_resources : "HOST_SHARE/CPU_USAGE,MONITORING/CAPACITY/USED_CPU,HOST_SHARE/MAX_CPU,HOST_SHARE/TOTAL_CPU,HOST_SHARE/MEM_USAGE,MONITORING/CAPACITY/USED_MEMORY,HOST_SHARE/MAX_MEM,HOST_SHARE/TOTAL_MEM"
monitor_resources : "CAPACITY/USED_CPU,CAPACITY/FREE_CPU,CAPACITY/USED_MEMORY,CAPACITY/FREE_MEMORY"
}
},
success: function(req, response) {
var host_graphs = [
{
monitor_resources : "HOST_SHARE/CPU_USAGE,MONITORING/CAPACITY/USED_CPU,HOST_SHARE/MAX_CPU,HOST_SHARE/TOTAL_CPU",
labels : Locale.tr("Allocated") + "," + Locale.tr("Real") + "," + Locale.tr("Total") + "," + Locale.tr("Total +/- reserved"),
monitor_resources : "CAPACITY/USED_CPU,CAPACITY/FREE_CPU",
labels : Locale.tr("Used CPU") + "," + Locale.tr("Free CPU"),
humanize_figures : false,
div_graph : $("#host_cpu_graph"),
div_legend : $("#host_cpu_legend")
},
{
monitor_resources : "HOST_SHARE/MEM_USAGE,MONITORING/CAPACITY/USED_MEMORY,HOST_SHARE/MAX_MEM,HOST_SHARE/TOTAL_MEM",
labels : Locale.tr("Allocated") + "," + Locale.tr("Real") + "," + Locale.tr("Total") + "," + Locale.tr("Total +/- reserved"),
monitor_resources : "CAPACITY/USED_MEMORY,CAPACITY/FREE_MEMORY",
labels : Locale.tr("Used MEMORY") + "," + Locale.tr("Free MEMORY"),
humanize_figures : false,
humanize_figures : true,
div_graph : $("#host_mem_graph"),

View File

@ -445,40 +445,40 @@ define(function(require) {
timeout: true,
id: data.ID,
monitor: {
monitor_resources : "MONITORING/CPU,MONITORING/MEMORY,MONITORING/NETTX,MONITORING/NETRX"
monitor_resources : "CPU,MEMORY,NETTX,NETRX"
}
},
success: function(request, response){
var vm_graphs = [
{
monitor_resources : "MONITORING/CPU",
monitor_resources : "CPU",
labels : "Real CPU",
humanize_figures : false,
div_graph : $(".vm_cpu_provision_graph", context)
},
{
monitor_resources : "MONITORING/MEMORY",
monitor_resources : "MEMORY",
labels : "Real MEM",
humanize_figures : true,
div_graph : $(".vm_memory_provision_graph", context)
},
{
labels : "Network reception",
monitor_resources : "MONITORING/NETRX",
monitor_resources : "NETRX",
humanize_figures : true,
convert_from_bytes : true,
div_graph : $(".vm_net_rx_provision_graph", context)
},
{
labels : "Network transmission",
monitor_resources : "MONITORING/NETTX",
monitor_resources : "NETTX",
humanize_figures : true,
convert_from_bytes : true,
div_graph : $(".vm_net_tx_provision_graph", context)
},
{
labels : "Network reception speed",
monitor_resources : "MONITORING/NETRX",
monitor_resources : "NETRX",
humanize_figures : true,
convert_from_bytes : true,
y_sufix : "B/s",
@ -487,7 +487,7 @@ define(function(require) {
},
{
labels : "Network transmission speed",
monitor_resources : "MONITORING/NETTX",
monitor_resources : "NETTX",
humanize_figures : true,
convert_from_bytes : true,
y_sufix : "B/s",

View File

@ -100,7 +100,7 @@ define(function(require) {
objLeases.resource = "template";
objLeases.__proto__ = FormPanel.prototype;
Leases.actions(objLeases);
if(Config.isFeatureEnabled("instantiate_persistent")){
$("input.instantiate_pers", context).on("change", function(){
var persistent = $(this).prop("checked");
@ -125,7 +125,7 @@ define(function(require) {
$("#vm_n_times_disabled", context).hide();
$("#vm_n_times", context).show();
}
context.off("click", "#add_scheduling_inst_action");
context.on("click", "#add_scheduling_inst_action", function() {
var actions = ["terminate", "terminate-hard", "hold", "release", "stop", "suspend", "resume", "reboot", "reboot-hard", "poweroff", "poweroff-hard", "undeploy", "undeploy-hard", "snapshot-create"];
@ -134,7 +134,6 @@ define(function(require) {
ScheduleActions.setup(context);
return false;
});
context.off("click", "#add_inst_action_json");
context.on("click", "#add_inst_action_json", function(){
var sched_action = ScheduleActions.retrieveNewAction(context);
@ -144,12 +143,10 @@ define(function(require) {
return false;
});
context.on("focusout" , "#time_input", function(){
$("#time_input").removeAttr("data-invalid");
$("#time_input").removeAttr("class");
});
context.off("click", ".remove_action_x");
context.on("click", ".remove_action_x", function(){
$(this).parents("tr").remove();

View File

@ -112,39 +112,27 @@ define(function(require) {
data: {
id: that.element.ID,
monitor: {
monitor_resources : "MONITORING/CPU,TEMPLATE/CPU,MONITORING/MEMORY,TEMPLATE/MEMORY"
monitor_resources : "CPU,MEMORY"
}
},
success: function(req, response) {
var vmGraphs = [
{
monitor_resources : "TEMPLATE/CPU,MONITORING/CPU",
labels : Locale.tr("Allocated") + "," + Locale.tr("Real"),
monitor_resources : "CPU",
labels : Locale.tr("CPU usage"),
humanize_figures : false,
div_graph : $(".vm_cpu_graph", context),
div_legend : $(".vm_cpu_legend", context)
},
{
monitor_resources : "TEMPLATE/MEMORY,MONITORING/MEMORY",
labels : Locale.tr("Allocated") + "," + Locale.tr("Real"),
monitor_resources : "MEMORY",
labels : ("Memory usage"),
humanize_figures : true,
div_graph : $(".vm_memory_graph", context),
div_legend : $(".vm_memory_legend", context)
}
];
if(response.monitoring["TEMPLATE/CPU"] != undefined){
response.monitoring["TEMPLATE/CPU"].map(function(e){
e[1] = e[1] * 100;
});
}
if(response.monitoring["TEMPLATE/MEMORY"] != undefined){
response.monitoring["TEMPLATE/MEMORY"].map(function(e){
e[1] = e[1] * 1024;
});
}
for (var i = 0; i < vmGraphs.length; i++) {
Graphs.plot(response, vmGraphs[i]);
}

View File

@ -542,28 +542,28 @@ define(function(require) {
data: {
id: that.element.ID,
monitor: {
monitor_resources : "MONITORING/NETTX,MONITORING/NETRX"
monitor_resources : "NETTX,NETRX"
}
},
success: function(req, response) {
var vmGraphs = [
{
labels : Locale.tr("Network reception"),
monitor_resources : "MONITORING/NETRX",
monitor_resources : "NETRX",
humanize_figures : true,
convert_from_bytes : true,
div_graph : $("#vm_net_rx_graph")
},
{
labels : Locale.tr("Network transmission"),
monitor_resources : "MONITORING/NETTX",
monitor_resources : "NETTX",
humanize_figures : true,
convert_from_bytes : true,
div_graph : $("#vm_net_tx_graph")
},
{
labels : Locale.tr("Network reception speed"),
monitor_resources : "MONITORING/NETRX",
monitor_resources : "NETRX",
humanize_figures : true,
convert_from_bytes : true,
y_sufix : "B/s",
@ -572,7 +572,7 @@ define(function(require) {
},
{
labels : Locale.tr("Network transmission speed"),
monitor_resources : "MONITORING/NETTX",
monitor_resources : "NETTX",
humanize_figures : true,
convert_from_bytes : true,
y_sufix : "B/s",

View File

@ -746,14 +746,14 @@ define(function(require) {
data: {
id: that.element.ID,
monitor: {
monitor_resources : "MONITORING/DISKRDBYTES,MONITORING/DISKWRBYTES,MONITORING/DISKRDIOPS,MONITORING/DISKWRIOPS"
monitor_resources : "DISKRDBYTES,DISKWRBYTES,DISKRDIOPS,DISKWRIOPS"
}
},
success: function(req, response) {
var vmGraphs = [
{
labels : Locale.tr("Disk read bytes"),
monitor_resources : "MONITORING/DISKRDBYTES",
monitor_resources : "DISKRDBYTES",
humanize_figures : true,
convert_from_bytes : true,
derivative : true,
@ -761,7 +761,7 @@ define(function(require) {
},
{
labels : Locale.tr("Disk write bytes"),
monitor_resources : "MONITORING/DISKWRBYTES",
monitor_resources : "DISKWRBYTES",
humanize_figures : true,
convert_from_bytes : true,
derivative : true,
@ -769,7 +769,7 @@ define(function(require) {
},
{
labels : Locale.tr("Disk read IOPS"),
monitor_resources : "MONITORING/DISKRDIOPS",
monitor_resources : "DISKRDIOPS",
//humanize_figures : true,
//convert_from_bytes : true,
y_sufix : "IOPS/s",
@ -778,7 +778,7 @@ define(function(require) {
},
{
labels : Locale.tr("Disk write IOPS"),
monitor_resources : "MONITORING/DISKWRIOPS",
monitor_resources : "DISKWRIOPS",
//humanize_figures : true,
//convert_from_bytes : true,
y_sufix : "IOPS/s",

View File

@ -263,4 +263,4 @@ define(function(require) {
""
];
}
});
});

View File

@ -16,42 +16,24 @@
#include "VirtualMachineMonitorInfo.h"
#include "ObjectXML.h"
#include "Attribute.h"
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
using namespace std;
#define xml_print(name, value) "<"#name">" << value << "</"#name">"
#define xml_print(name, value) "<"#name">" << one_util::escape_xml(value) \
<< "</"#name">"
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
string VirtualMachineMonitorInfo::to_xml() const
{
string monitor_str;
string tmp;
return monitoring.to_xml(monitor_str);
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
string VirtualMachineMonitorInfo::to_xml_extended() const
{
string monitor_str;
ostringstream oss;
oss << "<MONITORING>";
oss << xml_print(TIMESTAMP, _timestamp);
oss << xml_print(ID, _oid);
oss << monitoring.to_xml(monitor_str);
oss << "</MONITORING>";
// todo add Template (CPU and MEMORY)
return oss.str();
return monitoring.to_xml(tmp);
}
/* -------------------------------------------------------------------------- */
@ -73,9 +55,9 @@ string VirtualMachineMonitorInfo::to_xml_short() const
monitoring.get("STATE", state);
oss << "<MONITORING>"
<< "<CPU>" << one_util::escape_xml(cpu) << "</CPU>"
<< "<MEMORY>" << one_util::escape_xml(memory) << "</MEMORY>"
<< "<STATE>" << one_util::escape_xml(state) << "</STATE>"
<< xml_print(CPU, cpu)
<< xml_print(MEMORY, memory)
<< xml_print(STATE, state)
<< "</MONITORING>";
}
@ -87,26 +69,15 @@ string VirtualMachineMonitorInfo::to_xml_short() const
int VirtualMachineMonitorInfo::from_xml(const std::string& xml_string)
{
ObjectXML xml(xml_string);
int rc = xml.xpath(_timestamp, "/MONITORING/TIMESTAMP", 0L);
rc += xml.xpath(_oid, "/MONITORING/ID", -1);
int rc = monitoring.from_xml(xml_string);
if (rc < 0)
{
return -1;
}
vector<xmlNodePtr> content;
xml.get_nodes("/MONITORING/MONITORING", content);
if (!content.empty())
{
monitoring.from_xml_node(content[0]);
xml.free_nodes(content);
content.clear();
}
return monitoring.get("TIMESTAMP", _timestamp) &&
monitoring.get("ID", _oid);
return 0;
}
@ -116,19 +87,11 @@ int VirtualMachineMonitorInfo::from_xml(const std::string& xml_string)
int VirtualMachineMonitorInfo::from_template(const Template &tmpl)
{
int tmp;
if (tmpl.get("OID", tmp))
{
_oid = tmp;
}
if (_oid < 0)
{
return -1;
}
monitoring.merge(&tmpl);
monitoring.replace("ID", _oid);
monitoring.replace("TIMESTAMP", _timestamp);
return 0;
}
@ -139,7 +102,13 @@ void VirtualMachineMonitorInfo::reset_info()
{
_timestamp = time(0);
monitoring.clear();
monitoring.replace("CPU","0.0");
monitoring.replace("MEMORY","0");
}
monitoring.replace("TIMESTAMP", _timestamp);
monitoring.replace("ID", _oid);
}

View File

@ -35,6 +35,7 @@ xml = STDIN.read
microvm = MicroVM.new_from_xml(xml, nil)
microvm.gen_deployment_file
microvm.gen_logs_files
# Create microVM
rc = microvm.create

View File

@ -48,8 +48,10 @@ rm -rf "$ROOTFS_PATH/dev"
rm -f "$ROOTFS_PATH/api.socket"
rm -f "$ROOTFS_PATH/firecracker"
# Unmount VM directory
umount "$ROOTFS_PATH"
# Unmount VM directory (if needed)
if grep -qs "$ROOTFS_PATH" /proc/mounts; then
umount "$ROOTFS_PATH"
fi
#-------------------------------------------------------------------------------
# Wait for a cgroup to not being used

View File

@ -32,7 +32,7 @@ class FirecrackerClient
'Accept' => 'application/json',
'Content-Type' => 'application/json'
}.freeze
API_RETRY = 5 # Attempts, in case a response is failed to read from LXD
API_RETRY = 5 # Attempts, in case a response is failed to read from FC
def initialize(socket_path)
# rubocop:disable Style/RescueStandardError
@ -79,19 +79,6 @@ class FirecrackerClient
get_response(Net::HTTP::Patch.new("#{API}/#{uri}", HEADER), data)
end
# Waits for an operation returned in response to be completed
# def wait(response, timeout)
# operation_id = response['operation'].split('/').last
#
# timeout = "?timeout=#{timeout}" unless [nil, ''].include?(timeout)
#
# response = get("operations/#{operation_id}/wait#{timeout}")
#
# raise LXDError, response if response['metadata']['status'] == 'Failure'
#
# response
# end
private
# Enable communication with Firecracker via unix socket
@ -142,7 +129,7 @@ class FirecrackerClient
end
# Error used for raising LXDClient exception when response is error return value
# Error used for raising FC Client exception when response is error return value
class FirecrackerError < StandardError
attr_reader :body, :error, :code, :type

View File

@ -0,0 +1,85 @@
#!/bin/bash
# Simple conversion script of ISO 9660 image into image based
# on ext2/3/4 filesytem. Completely unprivileged, without needing
# to mount both images. Free to use under Apache 2.0 License.
# 2020, Vlastimil Holer <vlastimil.holer@gmail.com>
export PATH=/usr/sbin:$PATH
set -e -o pipefail
TYPE=${TYPE:-ext4}
LABEL=${LABEL:-CONTEXT}
DISK_SRC=$1
DISK_DST=${2:-${DISK_SRC}.${TYPE}}
if [ -z "${DISK_SRC}" ]; then
echo "SYNTAX: $0 <source image> [<dest. image>]" >&2
exit 1
fi
if ! [ -f "${DISK_SRC}" ]; then
echo "ERROR: File '${DISK_SRC}' not found" >&2
exit 1
fi
###
NEW_EXTR=$(mktemp "${DISK_SRC}.XXXXXX" -d)
NEW_DISK=$(mktemp "${DISK_SRC}.XXXXXX")
NEW_DBFS=$(mktemp "${DISK_SRC}.XXXXXX")
chmod go-rwx "${NEW_EXTR}" "${NEW_DISK}" "${NEW_DBFS}"
trap 'rm -rf "${NEW_EXTR}" "${NEW_DISK}" "${NEW_DBFS}"' EXIT TERM INT HUP
debugfs_do()
{
echo "${1}" >> "${NEW_DBFS}"
}
###
# unpack ISO file
bsdtar -xf "${DISK_SRC}" -C "${NEW_EXTR}"
find "${NEW_EXTR}" -mindepth 1 -exec chmod u+w,go+r {} \;
# create image with extX filesystem
NEW_SIZE=$(du -sk "${NEW_EXTR}" 2>/dev/null | cut -f1)
dd if=/dev/zero of="${NEW_DISK}" bs=1024 seek="$((NEW_SIZE + 1000))" count=1 status=none
mkfs -F -q -t "${TYPE}" -L "${LABEL}" "${NEW_DISK}"
tune2fs -c0 -i0 -r0 "${NEW_DISK}" >/dev/null
# generate debugfs script
while IFS= read -r -d $'\0' F; do
REL_F=${F#${NEW_EXTR}/}
DN=$(dirname "${REL_F}")
BN=$(basename "${REL_F}")
debugfs_do "cd /${DN}"
if [ -f "${F}" ]; then
debugfs_do "write ${F} ${BN}"
elif [ -d "${F}" ]; then
debugfs_do "mkdir ${BN}"
else
echo "ERROR: Unsupported file type of '${REL_F}'" >&2
exit 1
fi
done < <(find "${NEW_EXTR}" -mindepth 1 -print0)
# run debugfs and apply prepared script
OUT=$(debugfs -w "${NEW_DISK}" -f "${NEW_DBFS}" 2>&1 >/dev/null)
# error in debugfs run is mainly detected by nearly empty output,
# which has only debugfs welcome banner and no other messages
if [ "$(echo "${OUT}" | wc -l)" -ne 1 ] || ! [[ "${OUT}" =~ ^debugfs ]]; then
echo "ERROR: Failed to convert ISO into ${TYPE} image due to error:" >&2
echo "${OUT}" >&2
exit 1
fi
# success!
mv -f "${NEW_DISK}" "${DISK_DST}"
exit 0

View File

@ -1,79 +0,0 @@
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
function clean () {
# Clean temporary directories
umount "$MAP_PATH/fs"
rm -rf "$MAP_PATH"
}
# exit when any command fails
set -eE
SYSDS_PATH=""
ROOTFS_ID=""
VM_ID=""
while getopts ":s:c:r:v:" opt; do
case $opt in
s) SYSDS_PATH="$OPTARG" ;;
c) CONTEXT_ID=$OPTARG ;;
r) ROOTFS_ID="$OPTARG" ;;
v) VM_ID="$OPTARG" ;;
esac
done
shift $(($OPTIND - 1))
if [ -z "$SYSDS_PATH" ] || [ -z "$CONTEXT_ID" ] || [ -z "$ROOTFS_ID" ] || [ -z "$VM_ID" ]; then
exit -1
fi
rgx_num="^[0-9]+$"
if ! [[ $CONTEXT_ID =~ $rgx_num ]] || ! [[ $ROOTFS_ID =~ $rgx_num ]] || ! [[ $VM_ID =~ $rgx_num ]]; then
exit -1
fi
VM_LOCATION="$SYSDS_PATH/$VM_ID"
MAP_PATH="$VM_LOCATION/map_context"
CONTEXT_PATH="$VM_LOCATION/disk.$CONTEXT_ID"
ROOTFS_PATH="$VM_LOCATION/disk.$ROOTFS_ID"
trap clean ERR
# Create temporary directories
mkdir "$MAP_PATH"
mkdir "$MAP_PATH/context"
mkdir "$MAP_PATH/fs"
# Mount rootfs
mount "$ROOTFS_PATH" "$MAP_PATH/fs"
# Create /context directory inside rootfs
if [ ! -d "$MAP_PATH/fs/context" ]; then
mkdir "$MAP_PATH/fs/context"
fi
# Move the context disk info into the microVM fs
bsdtar -xf "$CONTEXT_PATH" -C "$MAP_PATH/fs/context"
clean
exit 0

View File

@ -24,6 +24,14 @@ require 'opennebula_vm'
# This class interacts with Firecracker
class MicroVM
#---------------------------------------------------------------------------
# List of commands executed by the driver.
#---------------------------------------------------------------------------
COMMANDS = {
:clean => 'sudo /var/tmp/one/vmm/firecracker/clean_fc',
:map_context => '/var/tmp/one/vmm/firecracker/map_context'
}
# rubocop:disable Naming/AccessorMethodName
# rubocop:disable Layout/LineLength
@ -48,9 +56,6 @@ class MicroVM
@rootfs_dir = "/srv/jailer/firecracker/#{@one.vm_name}/root"
@context_path = "#{@rootfs_dir}/context"
@map_context_sh = '/var/tmp/one/vmm/firecracker/map_context.sh'
@clean_sh = '/var/tmp/one/vmm/firecracker/clean.sh'
end
class << self
@ -74,6 +79,14 @@ class MicroVM
end
end
def gen_logs_files
path_log = "#{vm_location}/#{@fc['deployment-file']['logger']['log_fifo']}"
path_metrics = "#{vm_location}/#{@fc['deployment-file']['logger']['metrics_fifo']}"
File.open(path_log, 'w') {}
File.open(path_metrics, 'w') {}
end
def vm_location
"#{@one.sysds_path}/#{@one.vm_id}"
end
@ -88,8 +101,9 @@ class MicroVM
end
def get_pid
rc, stdout, = Command.execute("ps auxwww | grep '^.*firecracker.*\\<#{@one.vm_name}\\>' | grep -v grep",
false)
rc, stdout, = Command.execute("ps auxwww | grep " \
"\"^.*firecracker.*--id['\\\"=[[:space:]]]*#{@one.vm_name}\" " \
"| grep -v grep", false)
if !rc.zero? || stdout.nil?
return -1
@ -106,11 +120,11 @@ class MicroVM
return 0 unless context['context'] # return if there is no context
context_id = context['context']['disk_id']
context_location = context['context']['source']
params = " -s #{@one.sysds_path} -c #{context_id} -r #{@one.rootfs_id} -v #{@one.vm_id}"
params = " #{context_location} #{context_location}"
cmd = "sudo #{@map_context_sh} #{params}"
cmd = "#{COMMANDS[:map_context]} #{params}"
Command.execute_rc_log(cmd, false)
end
@ -179,11 +193,8 @@ class MicroVM
# TODO: make screen oprions configurable to support different versions
# TODO: make screen configurable to enable use of tmux etc..
# TODO: make log file from screen configurable (not working on CentOS 7)
if @one.vnc?
cmd << 'screen -L'
cmd << " -Logfile /tmp/fc-log-#{@one.vm_id}"
cmd << " -dmS #{@one.vm_name} "
cmd << "screen -dmS #{@one.vm_name} "
end
# Build jailer command params
@ -234,7 +245,7 @@ class MicroVM
params = "-c #{cgroup_path} -v #{@one.vm_name} -t #{timeout}"
params << ' -o' unless delete
cmd = "sudo #{@clean_sh} #{params}"
cmd = "sudo #{COMMANDS[:clean]} #{params}"
Command.execute_rc_log(cmd, false)
end

View File

@ -111,6 +111,11 @@ class OpenNebulaVM
fc['deployment-file']['network-interfaces'] = []
fc['command-params'] = {}
# Set logger info
fc['deployment-file']['logger'] = {}
fc['deployment-file']['logger']['log_fifo'] = 'logs.fifo'
fc['deployment-file']['logger']['metrics_fifo'] = 'metrics.fifo'
boot_source(fc['deployment-file']['boot-source'])
drives(fc['deployment-file']['drives'])
machine_config(fc['deployment-file']['machine-config'])