2009-01-02 17:58:51 +03:00
#!/bin/bash
2009-01-19 21:05:46 +03:00
# -------------------------------------------------------------------------- #
2023-01-09 14:23:19 +03:00
# Copyright 2002-2023, OpenNebula Project, OpenNebula Systems #
2009-01-19 21:05:46 +03:00
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
2009-01-02 17:58:51 +03:00
#-------------------------------------------------------------------------------
# Install program for OpenNebula. It will install it relative to
2010-07-11 22:39:10 +04:00
# $ONE_LOCATION if defined with the -d option, otherwise it'll be installed
2009-01-02 17:58:51 +03:00
# under /. In this case you may specified the oneadmin user/group, so you do
2011-10-07 17:02:31 +04:00
# not need run the OpenNebula daemon with root privileges
2009-01-02 17:58:51 +03:00
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# COMMAND LINE PARSING
#-------------------------------------------------------------------------------
usage( ) {
2012-09-24 18:18:17 +04:00
echo
echo "Usage: install.sh [-u install_user] [-g install_group] [-k keep conf]"
2018-11-16 14:56:19 +03:00
echo " [-d ONE_LOCATION] [-c cli|ec2] [-r]"
2022-11-14 22:48:30 +03:00
echo " [-s] [-p] [-G] [-6] [-f] [-l] [-e] [-h]"
2012-09-24 18:18:17 +04:00
echo
echo "-u: user that will run opennebula, defaults to user executing install.sh"
echo "-g: group of the user that will run opennebula, defaults to user"
echo " executing install.sh"
echo "-k: keep configuration files of existing OpenNebula installation, useful"
echo " when upgrading. This flag should not be set when installing"
echo " OpenNebula for the first time"
echo "-d: target installation directory, if not defined it'd be root. Must be"
echo " an absolute path."
2014-07-15 00:37:11 +04:00
echo "-c: install client utilities: OpenNebula cli and ec2 client files"
2012-09-24 18:18:17 +04:00
echo "-s: install OpenNebula Sunstone"
2015-07-17 16:48:30 +03:00
echo "-p: do not install OpenNebula Sunstone non-minified files"
2020-07-02 17:12:39 +03:00
echo "-F: install OpenNebula FireEdge"
echo "-P: do not install OpenNebula FireEdge non-minified files"
2018-10-09 15:50:25 +03:00
echo "-G: install only OpenNebula Gate"
2022-11-14 22:48:30 +03:00
echo "-6: install only OpenNebula Gate Proxy"
2018-10-09 15:50:25 +03:00
echo "-f: install only OpenNebula Flow"
2012-09-24 18:18:17 +04:00
echo "-r: remove Opennebula, only useful if -d was not specified, otherwise"
echo " rm -rf \$ONE_LOCATION would do the job"
echo "-l: creates symlinks instead of copying files, useful for development"
2018-04-03 15:52:33 +03:00
echo "-e: install OpenNebula docker machine driver"
2012-09-24 18:18:17 +04:00
echo "-h: prints this help"
2008-06-17 20:27:32 +04:00
}
2009-01-02 17:58:51 +03:00
#-------------------------------------------------------------------------------
2008-06-17 20:27:32 +04:00
2022-11-14 22:48:30 +03:00
PARAMETERS = ":u:g:d:ehkrlcspFPorlfG6"
2013-02-26 00:15:26 +04:00
2009-01-02 17:58:51 +03:00
INSTALL_ETC = "yes"
UNINSTALL = "no"
2009-08-27 21:25:39 +04:00
LINK = "no"
2009-09-25 22:18:50 +04:00
CLIENT = "no"
2013-06-27 13:36:08 +04:00
ONEGATE = "no"
2022-11-14 22:48:30 +03:00
ONEGATE_PROXY = "no"
2011-02-23 19:27:17 +03:00
SUNSTONE = "no"
2015-07-17 16:48:30 +03:00
SUNSTONE_DEV = "yes"
2020-07-02 17:12:39 +03:00
FIREEDGE = "no"
FIREEDGE_DEV = "yes"
2013-07-10 15:29:53 +04:00
ONEFLOW = "no"
2009-01-02 17:58:51 +03:00
ONEADMIN_USER = ` id -u`
ONEADMIN_GROUP = ` id -g`
SRC_DIR = $PWD
2018-04-03 15:52:33 +03:00
DOCKER_MACHINE = "no"
2008-11-13 19:21:17 +03:00
2018-11-21 20:02:01 +03:00
while getopts $PARAMETERS opt; do
2018-11-16 14:56:19 +03:00
case $opt in
2018-11-16 18:41:14 +03:00
e) DOCKER_MACHINE = "yes" ; ;
h) usage; exit 0; ;
k) INSTALL_ETC = "no" ; ;
r) UNINSTALL = "yes" ; ;
l) LINK = "yes" ; ;
c) CLIENT = "yes" ; INSTALL_ETC = "no" ; ;
G) ONEGATE = "yes" ; ;
2022-11-14 22:48:30 +03:00
6) ONEGATE_PROXY = "yes" ; ;
2018-11-16 18:41:14 +03:00
s) SUNSTONE = "yes" ; ;
p) SUNSTONE_DEV = "no" ; ;
2020-07-02 17:12:39 +03:00
F) FIREEDGE = "yes" ; ;
P) FIREEDGE_DEV = "no" ; ;
2018-11-16 18:41:14 +03:00
f) ONEFLOW = "yes" ; ;
u) ONEADMIN_USER = " $OPTARG " ; ;
g) ONEADMIN_GROUP = " $OPTARG " ; ;
d) ROOT = " $OPTARG " ; ;
2018-11-16 14:56:19 +03:00
\? ) usage; exit 1 ; ;
2012-09-24 18:18:17 +04:00
esac
2009-01-02 17:58:51 +03:00
done
2018-11-21 20:02:01 +03:00
2018-11-16 18:41:14 +03:00
shift $(( $OPTIND - 1 ))
2008-11-13 19:21:17 +03:00
2009-01-02 17:58:51 +03:00
#-------------------------------------------------------------------------------
# Definition of locations
#-------------------------------------------------------------------------------
2011-06-13 18:08:07 +04:00
CONF_LOCATION = " $HOME /.one "
2009-01-19 19:40:46 +03:00
if [ -z " $ROOT " ] ; then
2012-09-24 18:18:17 +04:00
BIN_LOCATION = "/usr/bin"
LIB_LOCATION = "/usr/lib/one"
2020-04-08 11:44:48 +03:00
SBIN_LOCATION = "/usr/sbin"
2012-09-24 18:18:17 +04:00
ETC_LOCATION = "/etc/one"
LOG_LOCATION = "/var/log/one"
VAR_LOCATION = "/var/lib/one"
2013-06-27 13:36:08 +04:00
ONEGATE_LOCATION = " $LIB_LOCATION /onegate "
2022-11-14 22:48:30 +03:00
ONEGATE_PROXY_LOCATION = " $LIB_LOCATION /onegate-proxy "
2012-09-24 18:18:17 +04:00
SUNSTONE_LOCATION = " $LIB_LOCATION /sunstone "
2020-07-02 17:12:39 +03:00
FIREEDGE_LOCATION = " $LIB_LOCATION /fireedge "
2013-07-10 15:29:53 +04:00
ONEFLOW_LOCATION = " $LIB_LOCATION /oneflow "
2019-09-09 15:43:51 +03:00
ONEHEM_LOCATION = " $LIB_LOCATION /onehem "
2012-09-24 18:18:17 +04:00
SYSTEM_DS_LOCATION = " $VAR_LOCATION /datastores/0 "
DEFAULT_DS_LOCATION = " $VAR_LOCATION /datastores/1 "
RUN_LOCATION = "/var/run/one"
LOCK_LOCATION = "/var/lock/one"
INCLUDE_LOCATION = "/usr/include"
SHARE_LOCATION = "/usr/share/one"
MAN_LOCATION = "/usr/share/man/man1"
2012-10-29 20:36:50 +04:00
VM_LOCATION = "/var/lib/one/vms"
2020-06-08 18:15:43 +03:00
DOCS_LOCATION = "/usr/share/doc/one"
2020-07-02 17:12:39 +03:00
SUNSTONE_MAIN_JS_LOCATION = " $VAR_LOCATION /sunstone "
2018-04-03 15:52:33 +03:00
DOCKER_MACHINE_LOCATION = "src/docker_machine/src/docker_machine/bin/docker-machine-driver-opennebula"
2012-09-24 18:18:17 +04:00
if [ " $CLIENT " = "yes" ] ; then
MAKE_DIRS = " $BIN_LOCATION $LIB_LOCATION $ETC_LOCATION "
DELETE_DIRS = ""
CHOWN_DIRS = ""
elif [ " $SUNSTONE " = "yes" ] ; then
MAKE_DIRS = " $BIN_LOCATION $LIB_LOCATION $VAR_LOCATION \
2020-07-02 17:12:39 +03:00
$SUNSTONE_LOCATION $ETC_LOCATION $SUNSTONE_MAIN_JS_LOCATION "
DELETE_DIRS = " $MAKE_DIRS "
CHOWN_DIRS = ""
elif [ " $FIREEDGE " = "yes" ] ; then
MAKE_DIRS = " $BIN_LOCATION $LIB_LOCATION $VAR_LOCATION \
$ETC_LOCATION $FIREEDGE_LOCATION "
2012-09-24 18:18:17 +04:00
DELETE_DIRS = " $MAKE_DIRS "
2013-06-27 13:36:08 +04:00
CHOWN_DIRS = ""
elif [ " $ONEGATE " = "yes" ] ; then
MAKE_DIRS = " $BIN_LOCATION $LIB_LOCATION $VAR_LOCATION \
$ONEGATE_LOCATION $ETC_LOCATION "
DELETE_DIRS = " $MAKE_DIRS "
2022-11-14 22:48:30 +03:00
CHOWN_DIRS = ""
elif [ " $ONEGATE_PROXY " = "yes" ] ; then
MAKE_DIRS = " $BIN_LOCATION $LIB_LOCATION $VAR_LOCATION \
2022-12-07 11:25:48 +03:00
$ONEGATE_PROXY_LOCATION "
2022-11-14 22:48:30 +03:00
DELETE_DIRS = " $MAKE_DIRS "
2013-07-10 15:29:53 +04:00
CHOWN_DIRS = ""
elif [ " $ONEFLOW " = "yes" ] ; then
MAKE_DIRS = " $BIN_LOCATION $LIB_LOCATION $VAR_LOCATION $ONEFLOW_LOCATION \
$ETC_LOCATION "
DELETE_DIRS = " $MAKE_DIRS "
2012-09-24 18:18:17 +04:00
CHOWN_DIRS = ""
2018-04-03 15:52:33 +03:00
elif [ " $DOCKER_MACHINE " = "yes" ] ; then
2018-05-09 18:35:44 +03:00
MAKE_DIRS = " $BIN_LOCATION "
DELETE_DIRS = " $MAKE_DIRS "
CHOWN_DIRS = ""
2012-09-24 18:18:17 +04:00
else
2020-04-08 11:44:48 +03:00
MAKE_DIRS = " $BIN_LOCATION $SBIN_LOCATION $LIB_LOCATION $ETC_LOCATION $VAR_LOCATION \
2015-02-24 17:41:14 +03:00
$INCLUDE_LOCATION $SHARE_LOCATION $DOCS_LOCATION \
2012-09-24 18:18:17 +04:00
$LOG_LOCATION $RUN_LOCATION $LOCK_LOCATION \
2012-10-29 20:36:50 +04:00
$SYSTEM_DS_LOCATION $DEFAULT_DS_LOCATION $MAN_LOCATION \
2022-11-14 22:48:30 +03:00
$VM_LOCATION $ONEGATE_LOCATION $ONEGATE_PROXY_LOCATION $ONEFLOW_LOCATION \
2020-07-02 17:12:39 +03:00
$SUNSTONE_MAIN_JS_LOCATION $ONEHEM_LOCATION "
2012-09-24 18:18:17 +04:00
DELETE_DIRS = " $LIB_LOCATION $ETC_LOCATION $LOG_LOCATION $VAR_LOCATION \
$RUN_LOCATION $SHARE_DIRS "
CHOWN_DIRS = " $LOG_LOCATION $VAR_LOCATION $RUN_LOCATION $LOCK_LOCATION "
fi
2009-05-07 19:23:13 +04:00
2009-01-02 17:58:51 +03:00
else
2012-09-24 18:18:17 +04:00
BIN_LOCATION = " $ROOT /bin "
2020-04-08 11:44:48 +03:00
SBIN_LOCATION = " $ROOT /sbin "
2012-09-24 18:18:17 +04:00
LIB_LOCATION = " $ROOT /lib "
ETC_LOCATION = " $ROOT /etc "
VAR_LOCATION = " $ROOT /var "
2020-06-11 14:15:23 +03:00
RUN_LOCATION = " $VAR_LOCATION /run "
LOCK_LOCATION = " $VAR_LOCATION /lock "
2013-06-27 13:36:08 +04:00
ONEGATE_LOCATION = " $LIB_LOCATION /onegate "
2022-11-14 22:48:30 +03:00
ONEGATE_PROXY_LOCATION = " $LIB_LOCATION /onegate-proxy "
2012-09-24 18:18:17 +04:00
SUNSTONE_LOCATION = " $LIB_LOCATION /sunstone "
2020-07-02 17:12:39 +03:00
FIREEDGE_LOCATION = " $LIB_LOCATION /fireedge "
2013-07-10 15:29:53 +04:00
ONEFLOW_LOCATION = " $LIB_LOCATION /oneflow "
2019-09-09 15:43:51 +03:00
ONEHEM_LOCATION = " $LIB_LOCATION /onehem "
2012-09-24 18:18:17 +04:00
SYSTEM_DS_LOCATION = " $VAR_LOCATION /datastores/0 "
DEFAULT_DS_LOCATION = " $VAR_LOCATION /datastores/1 "
INCLUDE_LOCATION = " $ROOT /include "
SHARE_LOCATION = " $ROOT /share "
MAN_LOCATION = " $ROOT /share/man/man1 "
2012-10-29 20:36:50 +04:00
VM_LOCATION = " $VAR_LOCATION /vms "
2020-06-08 18:15:43 +03:00
DOCS_LOCATION = " $ROOT /share/doc "
2020-07-02 17:12:39 +03:00
SUNSTONE_MAIN_JS_LOCATION = " $VAR_LOCATION /sunstone "
2018-04-03 15:52:33 +03:00
DOCKER_MACHINE_LOCATION = "src/docker_machine/src/docker_machine/bin/docker-machine-driver-opennebula"
2018-05-08 16:10:03 +03:00
2012-09-24 18:18:17 +04:00
if [ " $CLIENT " = "yes" ] ; then
MAKE_DIRS = " $BIN_LOCATION $LIB_LOCATION $ETC_LOCATION "
2013-06-27 13:36:08 +04:00
DELETE_DIRS = " $MAKE_DIRS "
elif [ " $ONEGATE " = "yes" ] ; then
MAKE_DIRS = " $BIN_LOCATION $LIB_LOCATION $VAR_LOCATION \
$ONEGATE_LOCATION $ETC_LOCATION "
2022-11-14 22:48:30 +03:00
DELETE_DIRS = " $MAKE_DIRS "
elif [ " $ONEGATE_PROXY " = "yes" ] ; then
MAKE_DIRS = " $BIN_LOCATION $LIB_LOCATION $VAR_LOCATION \
2022-12-07 11:25:48 +03:00
$ONEGATE_PROXY_LOCATION "
2022-11-14 22:48:30 +03:00
2012-09-24 18:18:17 +04:00
DELETE_DIRS = " $MAKE_DIRS "
elif [ " $SUNSTONE " = "yes" ] ; then
MAKE_DIRS = " $BIN_LOCATION $LIB_LOCATION $VAR_LOCATION \
2020-07-02 17:12:39 +03:00
$SUNSTONE_LOCATION $ETC_LOCATION $SUNSTONE_MAIN_JS_LOCATION "
DELETE_DIRS = " $MAKE_DIRS "
elif [ " $FIREEDGE " = "yes" ] ; then
MAKE_DIRS = " $BIN_LOCATION $LIB_LOCATION $VAR_LOCATION \
$FIREEDGE_LOCATION $ETC_LOCATION "
2012-09-24 18:18:17 +04:00
2013-07-10 15:29:53 +04:00
DELETE_DIRS = " $MAKE_DIRS "
elif [ " $ONEFLOW " = "yes" ] ; then
MAKE_DIRS = " $BIN_LOCATION $LIB_LOCATION $VAR_LOCATION $ONEFLOW_LOCATION \
$ETC_LOCATION "
2012-09-24 18:18:17 +04:00
DELETE_DIRS = " $MAKE_DIRS "
2018-04-03 15:52:33 +03:00
elif [ " $DOCKER_MACHINE " = "yes" ] ; then
2018-05-09 18:35:44 +03:00
MAKE_DIRS = " $BIN_LOCATION "
DELETE_DIRS = " $MAKE_DIRS "
2012-09-24 18:18:17 +04:00
else
2020-04-08 11:44:48 +03:00
MAKE_DIRS = " $BIN_LOCATION $SBIN_LOCATION $LIB_LOCATION $ETC_LOCATION $VAR_LOCATION \
2012-09-24 18:18:17 +04:00
$INCLUDE_LOCATION $SHARE_LOCATION $SYSTEM_DS_LOCATION \
2015-02-24 17:41:14 +03:00
$DEFAULT_DS_LOCATION $MAN_LOCATION $DOCS_LOCATION \
2022-11-14 22:48:30 +03:00
$VM_LOCATION $ONEGATE_LOCATION $ONEGATE_PROXY_LOCATION $ONEFLOW_LOCATION \
2020-07-02 17:12:39 +03:00
$SUNSTONE_MAIN_JS_LOCATION $ONEHEM_LOCATION $LOCK_LOCATION $RUN_LOCATION "
2012-09-24 18:18:17 +04:00
DELETE_DIRS = " $MAKE_DIRS "
CHOWN_DIRS = " $ROOT "
fi
CHOWN_DIRS = " $ROOT "
2009-01-02 17:58:51 +03:00
fi
2008-11-13 19:21:17 +03:00
2012-12-11 17:45:02 +04:00
SHARE_DIRS = " $SHARE_LOCATION /examples \
2023-09-14 13:11:14 +03:00
$SHARE_LOCATION /examples/external_scheduler \
2019-09-23 16:16:05 +03:00
$SHARE_LOCATION /examples/host_hooks \
2019-10-02 17:49:32 +03:00
$SHARE_LOCATION /examples/network_hooks \
2017-06-12 13:50:06 +03:00
$SHARE_LOCATION /websockify \
2019-01-18 17:48:10 +03:00
$SHARE_LOCATION /websockify/websockify \
2020-04-01 16:22:32 +03:00
$SHARE_LOCATION /oneprovision \
2020-11-27 13:11:14 +03:00
$SHARE_LOCATION /dockerhub \
$SHARE_LOCATION /dockerhub/dockerfiles \
2020-05-18 03:23:29 +03:00
$SHARE_LOCATION /schemas \
$SHARE_LOCATION /schemas/libvirt \
2020-07-14 18:42:30 +03:00
$SHARE_LOCATION /schemas/xsd \
2020-05-15 19:51:04 +03:00
$SHARE_LOCATION /ssh \
2020-06-18 14:19:33 +03:00
$SHARE_LOCATION /start-scripts \
2020-06-25 12:24:51 +03:00
$SHARE_LOCATION /conf \
2020-11-25 13:59:47 +03:00
$SHARE_LOCATION /context \
$SHARE_LOCATION /onecfg
$SHARE_LOCATION /onecfg/etc"
2009-01-02 17:58:51 +03:00
2013-09-03 20:40:41 +04:00
ETC_DIRS = " $ETC_LOCATION /vmm_exec \
2012-09-24 18:18:17 +04:00
$ETC_LOCATION /hm \
$ETC_LOCATION /auth \
$ETC_LOCATION /auth/certificates \
2013-04-10 22:37:01 +04:00
$ETC_LOCATION /sunstone-views \
2018-01-19 15:24:16 +03:00
$ETC_LOCATION /cli \
$ETC_LOCATION /sunstone-views/kvm \
$ETC_LOCATION /sunstone-views/vcenter \
2021-06-29 18:57:31 +03:00
$ETC_LOCATION /sunstone-views/mixed \
$ETC_LOCATION /fireedge \
2021-09-16 19:04:11 +03:00
$ETC_LOCATION /fireedge/provision \
$ETC_LOCATION /fireedge/provision/providers.d \
2022-04-06 18:40:55 +03:00
$ETC_LOCATION /fireedge/provision/providers.d-extra \
2021-06-29 18:57:31 +03:00
$ETC_LOCATION /fireedge/sunstone \
$ETC_LOCATION /fireedge/sunstone/admin \
2023-11-20 20:00:18 +03:00
$ETC_LOCATION /fireedge/sunstone/user \
$ETC_LOCATION /fireedge/sunstone/groupadmin \
$ETC_LOCATION /fireedge/sunstone/cloud"
2009-01-02 17:58:51 +03:00
2010-11-16 14:29:14 +03:00
LIB_DIRS = " $LIB_LOCATION /ruby \
2012-12-18 19:14:47 +04:00
$LIB_LOCATION /ruby/opennebula \
2020-09-25 11:15:19 +03:00
$LIB_LOCATION /ruby/opennebula/flow \
2012-09-24 18:18:17 +04:00
$LIB_LOCATION /ruby/cloud/ \
$LIB_LOCATION /ruby/cloud/CloudAuth \
$LIB_LOCATION /ruby/onedb \
2014-03-03 22:11:17 +04:00
$LIB_LOCATION /ruby/onedb/shared \
$LIB_LOCATION /ruby/onedb/local \
2015-07-08 16:51:18 +03:00
$LIB_LOCATION /ruby/onedb/patches \
2013-05-20 19:08:45 +04:00
$LIB_LOCATION /ruby/vendors \
2012-09-24 18:18:17 +04:00
$LIB_LOCATION /mads \
$LIB_LOCATION /sh \
2020-05-15 19:51:04 +03:00
$LIB_LOCATION /sh/override \
2012-09-24 18:18:17 +04:00
$LIB_LOCATION /ruby/cli \
2017-02-22 18:17:56 +03:00
$LIB_LOCATION /ruby/cli/one_helper \
2019-01-18 18:56:48 +03:00
$LIB_LOCATION /ruby/vcenter_driver \
2019-08-22 12:15:24 +03:00
$LIB_LOCATION /ruby/nsx_driver \
2020-03-26 18:48:35 +03:00
$LIB_LOCATION /oneprovision/lib \
2021-05-19 17:46:47 +03:00
$LIB_LOCATION /oneprovision/provider_apis \
$LIB_LOCATION /oneprovision/provider_apis/vultr \
2020-10-28 17:11:34 +03:00
$LIB_LOCATION /oneprovision/lib/terraform \
$LIB_LOCATION /oneprovision/lib/terraform/providers \
$LIB_LOCATION /oneprovision/lib/terraform/providers/templates \
2020-11-06 13:09:28 +03:00
$LIB_LOCATION /oneprovision/lib/terraform/providers/templates/aws \
2021-04-30 12:49:51 +03:00
$LIB_LOCATION /oneprovision/lib/terraform/providers/templates/google \
$LIB_LOCATION /oneprovision/lib/terraform/providers/templates/digitalocean \
2021-09-17 10:51:14 +03:00
$LIB_LOCATION /oneprovision/lib/terraform/providers/templates/equinix \
2021-05-19 13:24:19 +03:00
$LIB_LOCATION /oneprovision/lib/terraform/providers/templates/vultr_metal \
$LIB_LOCATION /oneprovision/lib/terraform/providers/templates/vultr_virtual \
2020-10-13 14:38:19 +03:00
$LIB_LOCATION /oneprovision/lib/provision \
$LIB_LOCATION /oneprovision/lib/provider \
$LIB_LOCATION /oneprovision/lib/provision/resources \
$LIB_LOCATION /oneprovision/lib/provision/resources/virtual \
2020-11-25 13:59:47 +03:00
$LIB_LOCATION /oneprovision/lib/provision/resources/physical
$LIB_LOCATION /onecfg/lib \
$LIB_LOCATION /onecfg/lib/common \
$LIB_LOCATION /onecfg/lib/common/helpers \
$LIB_LOCATION /onecfg/lib/common/logger \
$LIB_LOCATION /onecfg/lib/config \
$LIB_LOCATION /onecfg/lib/config/type \
$LIB_LOCATION /onecfg/lib/config/type/augeas \
2021-01-05 12:38:14 +03:00
$LIB_LOCATION /onecfg/lib/config/type/yaml \
F #5516: New backup interface for OpenNebula
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Neal Hansen <nhansen@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io>
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
BACKUP INTERFACE
=================
* Backups are exposed through a a special Datastore (BACKUP_DS) and
Image (BACKUP) types. These new types can only be used for backup'ing
up VMs. This approach allows to:
- Implement tier based backup policies (backups made on different
locations).
- Leverage access control and quota systems
- Support differnt storage and backup technologies
* Backup interface for the VMs:
- VM configures backups with BACKUP_CONFIG. This attribute can be set
in the VM template or updated with updateconf API call. It can include:
+ BACKUP_VOLATILE: To backup or not volatile disks
+ FS_FREEZE: How the FS is freeze for running VMs (qemu-agent,
suspend or none). When possible backups are crash consistent.
+ KEEP_LAST: keep only a given number of backups.
- Backups are initiated by the one.vm.backup API call that requires
the target Datastore to perform the backup (one-shot). This is
exposed by the onevm backup command.
- Backups can be periodic through scheduled actions.
- Backup configuration is updated with one.vm.updateconf API call.
* Restore interface:
- Restores are initiated by the one.image.restore API call. This is
exposed by oneimage restore command.
- Restore include configurable options for the VM template
+ NO_IP: to not preserve IP addresses (but keep the NICs and network
mapping)
+ NO_NIC: to not preserve network mappings
- Other template attributes:
+ Clean PCI devices, including network configuration in case of TYPE=NIC
attributes. By default it removes SHORT_ADDRESS and leave the "auto"
selection attributes.
+ Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node
- It is possible to restore single files stored in the repository by
using the backup specific URL.
* Sunstone (Ruby version) has been updated to expose this feautres.
BACKUP DRIVERS & IMPLEMENTATION
===============================
* Backup operation is implemented by a combination of 3 driver operations:
- VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate
backups for RUNNING VMs.
- TM. This commit introduces 2 new operations (and their
corresponding _live variants):
+ pre_backup(_live): Prepares the disks to be back'ed up in the
repository. It is specific to the driver: (i) ceph uses the export
operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as
needed.
+ post_backup(_live): Performs cleanning operations, i.e. KVM
snapshots or tmp dirs.
- DATASTORE. Each backup technology is represented by its
corresponfing driver, that needs to implement:
+ backup: it takes the VM disks in file (qcow2) format and stores it
the backup repository.
+ restore: it takes a backup image and restores the associated disks
and VM template.
+ monitor: to gather available space in the repository
+ rm: to remove existing backups
+ stat: to return the "restored" size of a disk stored in a backup
+ downloader pseudo-URL handler: in the form
<backup_proto>://<driver_snapshot_id>/<disk filename>
BACKUP MANAGEMENT
=================
Backup actions may potentially take some time, leaving some vmm_exec threads in
use for a long time, stucking other vmm operations. Backups are planned
by the scheduler through the sched action interface.
Two attributes has been added to sched.conf:
* MAX_BACKUPS max active backup operations in the cloud. No more
backups will be started beyond this limit.
* MAX_BACKUPS_HOST max number of backups per host
* Fix onevm CLI to properly show and manage schedule actions. --schedule
supports now, as well as relative times +<seconds_from_stime>
onvm backup --schedule now -d 100 63
* Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs
to use the batch interface or request specific permissions
Internal restructure of Scheduler:
- All sched_actions interface is now in SchedActionsXML class and files.
This class uses references to VM XML, and MUST be used in the same
lifetime scope.
- XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as
static functions.
- VirtualMachineActionPool includes counters for active backups (total
and per host).
SUPPORTED PLATFORMS
====================
* hypervisor: KVM
* TM: qcow2/shared/ssh, ceph
* backup: restic, rsync
Notes on Ceph
* Ceph backups are performed in the following steps:
1. A snapshot of each disk is taken (group snapshots cannot be used as
it seems we cannot export the disks afterwards)
2. Disks are export to a file
3. File is converted to qcow2 format
4. Disk files are upload to the backup repo
TODO:
* Confirm crash consistent snapshots cannot be used in Ceph
TODO:
* Check if using VM dir instead of full path is better to accomodate
DS migrations i.e.:
- Current path: /var/lib/one/datastores/100/53/backup/disk.0
- Proposal: 53/backup/disk.0
RESTIC DRIVER
=============
Developed together with this feature is part of the EE edtion.
* It supports the SFTP protocol, the following attributes are
supported:
- RESTIC_SFTP_SERVER
- RESTIC_SFTP_USER: only if different from oneadmin
- RESTIC_PASSWORD
- RESTIC_IONICE: Run restic under a given ionice priority (class 2)
- RESTIC_NICE: Run restic under a given nice
- RESTIC_BWLIMIT: Limit restic upload/download BW
- RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes:
off, auto, max). This requires repositories version 2. By default,
auto is used (average compression without to much CPU usage)
- RESTIC_CONNECTIONS: Sets the number of concurrent connections to a
backend (5 by default). For high-latency backends this number can be
increased.
* downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name>
snapshot_id is the restic snapshot hash. To recover single disk images
from a backup. This URLs support:
- RESTIC_CONNECTIONS
- RESTIC_BWLIMIT
- RESTIC_IONICE
- RESTIC_NICE
These options needs to be defined in the associated datastore.
RSYNC DRIVER
=============
A rsync driver is included as part of the CE distribution. It uses the
rsync tool to store backups in a remote server through SSH:
* The following attributes are supported to configure the backup
datastore:
- RSYNC_HOST
- RSYNC_USER
- RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default)
* downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover
single files from an existing backup. (RSYNC_HOST and RSYN_USER needs
to be set in ds_id
EMULATOR_CPUS
=============
This commit includes a non related backup feature:
* Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the
CPU IDs where the emulator threads will be pinned. If this value is
not defined the allocated CPU wll be used when using a PIN policy.
(cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b)
F OpenNebula/one#5516: adding rsync backup driver
(cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305)
F OpenNebula/one#5516: update install.sh, add vmid to source, some polish
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d)
F OpenNebula/one#5516: cleanup
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340)
F OpenNebula/one#5516: update downloader, default args, size check
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305)
LL
(cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
$LIB_LOCATION /onecfg/lib/patch"
2009-01-02 17:58:51 +03:00
2010-11-16 14:29:14 +03:00
VAR_DIRS = " $VAR_LOCATION /remotes \
2018-01-18 14:27:38 +03:00
$VAR_LOCATION /remotes/etc \
2019-02-04 17:55:54 +03:00
$VAR_LOCATION /remotes/etc/tm/fs_lvm \
2020-10-06 10:33:58 +03:00
$VAR_LOCATION /remotes/etc/tm/ssh \
2019-04-09 17:11:15 +03:00
$VAR_LOCATION /remotes/etc/datastore/fs \
2018-01-18 14:27:38 +03:00
$VAR_LOCATION /remotes/etc/datastore/ceph \
$VAR_LOCATION /remotes/etc/im/kvm-probes.d \
2021-03-03 18:18:01 +03:00
$VAR_LOCATION /remotes/etc/im/qemu-probes.d \
2018-11-28 14:26:59 +03:00
$VAR_LOCATION /remotes/etc/im/lxd-probes.d \
2021-03-04 23:07:50 +03:00
$VAR_LOCATION /remotes/etc/im/lxc-probes.d \
2020-04-22 14:22:11 +03:00
$VAR_LOCATION /remotes/etc/im/firecracker-probes.d \
2019-04-09 17:11:15 +03:00
$VAR_LOCATION /remotes/etc/market/http \
2018-01-18 14:27:38 +03:00
$VAR_LOCATION /remotes/etc/vmm/kvm \
2018-11-28 14:26:59 +03:00
$VAR_LOCATION /remotes/etc/vmm/lxd \
2021-03-04 23:07:50 +03:00
$VAR_LOCATION /remotes/etc/vmm/lxc \
2021-09-10 20:19:45 +03:00
$VAR_LOCATION /remotes/etc/vmm/lxc/profiles \
2020-02-20 19:06:45 +03:00
$VAR_LOCATION /remotes/etc/vmm/firecracker \
2018-11-15 16:47:34 +03:00
$VAR_LOCATION /remotes/etc/vmm/vcenter \
2018-01-18 14:27:38 +03:00
$VAR_LOCATION /remotes/etc/vnm \
2012-09-24 18:18:17 +04:00
$VAR_LOCATION /remotes/im \
2020-03-04 18:05:57 +03:00
$VAR_LOCATION /remotes/im/lib \
2012-09-24 18:18:17 +04:00
$VAR_LOCATION /remotes/im/kvm.d \
2020-03-04 18:05:57 +03:00
$VAR_LOCATION /remotes/im/kvm-probes.d/host/beacon \
$VAR_LOCATION /remotes/im/kvm-probes.d/host/monitor \
$VAR_LOCATION /remotes/im/kvm-probes.d/host/system \
$VAR_LOCATION /remotes/im/kvm-probes.d/vm/monitor \
$VAR_LOCATION /remotes/im/kvm-probes.d/vm/status \
2021-01-21 12:30:45 +03:00
$VAR_LOCATION /remotes/im/kvm-probes.d/vm/snapshot \
2021-03-03 14:20:37 +03:00
$VAR_LOCATION /remotes/im/qemu.d \
$VAR_LOCATION /remotes/im/qemu-probes.d/host/beacon \
$VAR_LOCATION /remotes/im/qemu-probes.d/host/monitor \
$VAR_LOCATION /remotes/im/qemu-probes.d/host/system \
$VAR_LOCATION /remotes/im/qemu-probes.d/vm/monitor \
$VAR_LOCATION /remotes/im/qemu-probes.d/vm/status \
$VAR_LOCATION /remotes/im/qemu-probes.d/vm/snapshot \
2020-03-04 18:05:57 +03:00
$VAR_LOCATION /remotes/im/dummy.d \
$VAR_LOCATION /remotes/im/dummy-probes.d/host/beacon \
$VAR_LOCATION /remotes/im/dummy-probes.d/host/monitor \
$VAR_LOCATION /remotes/im/dummy-probes.d/host/system \
$VAR_LOCATION /remotes/im/dummy-probes.d/vm/monitor \
$VAR_LOCATION /remotes/im/dummy-probes.d/vm/status \
2021-01-25 11:51:40 +03:00
$VAR_LOCATION /remotes/im/dummy-probes.d/vm/snapshot \
2018-11-28 14:26:59 +03:00
$VAR_LOCATION /remotes/im/lxd.d \
2020-03-04 18:05:57 +03:00
$VAR_LOCATION /remotes/im/lxd-probes.d/host/beacon \
$VAR_LOCATION /remotes/im/lxd-probes.d/host/monitor \
$VAR_LOCATION /remotes/im/lxd-probes.d/host/system \
$VAR_LOCATION /remotes/im/lxd-probes.d/vm/monitor \
$VAR_LOCATION /remotes/im/lxd-probes.d/vm/status \
2021-01-22 12:39:32 +03:00
$VAR_LOCATION /remotes/im/lxd-probes.d/vm/snapshot \
2021-03-04 23:07:50 +03:00
$VAR_LOCATION /remotes/im/lxc.d \
$VAR_LOCATION /remotes/im/lxc-probes.d/host/beacon \
$VAR_LOCATION /remotes/im/lxc-probes.d/host/monitor \
$VAR_LOCATION /remotes/im/lxc-probes.d/host/system \
$VAR_LOCATION /remotes/im/lxc-probes.d/vm/monitor \
$VAR_LOCATION /remotes/im/lxc-probes.d/vm/status \
$VAR_LOCATION /remotes/im/lxc-probes.d/vm/snapshot \
2020-03-23 12:45:21 +03:00
$VAR_LOCATION /remotes/im/firecracker.d \
$VAR_LOCATION /remotes/im/firecracker-probes.d/host/beacon \
$VAR_LOCATION /remotes/im/firecracker-probes.d/host/monitor \
$VAR_LOCATION /remotes/im/firecracker-probes.d/host/system \
$VAR_LOCATION /remotes/im/firecracker-probes.d/vm/monitor \
$VAR_LOCATION /remotes/im/firecracker-probes.d/vm/status \
2021-01-22 12:39:32 +03:00
$VAR_LOCATION /remotes/im/firecracker-probes.d/vm/snapshot \
2014-09-19 15:14:57 +04:00
$VAR_LOCATION /remotes/im/vcenter.d \
2013-09-03 20:40:41 +04:00
$VAR_LOCATION /remotes/im/ec2.d \
2020-04-20 18:48:22 +03:00
$VAR_LOCATION /remotes/im/ec2-probes.d/host/beacon \
2020-04-05 20:50:45 +03:00
$VAR_LOCATION /remotes/im/ec2-probes.d/host/monitor \
$VAR_LOCATION /remotes/im/ec2-probes.d/host/system \
$VAR_LOCATION /remotes/im/ec2-probes.d/vm/monitor \
$VAR_LOCATION /remotes/im/ec2-probes.d/vm/status \
2021-01-25 13:03:13 +03:00
$VAR_LOCATION /remotes/im/ec2-probes.d/vm/snapshot \
2014-06-19 19:03:18 +04:00
$VAR_LOCATION /remotes/im/az.d \
2020-04-20 18:48:22 +03:00
$VAR_LOCATION /remotes/im/az-probes.d/host/beacon \
2020-04-05 20:50:45 +03:00
$VAR_LOCATION /remotes/im/az-probes.d/host/monitor \
$VAR_LOCATION /remotes/im/az-probes.d/host/system \
$VAR_LOCATION /remotes/im/az-probes.d/vm/monitor \
$VAR_LOCATION /remotes/im/az-probes.d/vm/status \
2021-01-25 13:03:13 +03:00
$VAR_LOCATION /remotes/im/az-probes.d/vm/snapshot \
2018-02-15 18:00:09 +03:00
$VAR_LOCATION /remotes/im/one.d \
2020-04-20 18:48:22 +03:00
$VAR_LOCATION /remotes/im/one-probes.d/host/beacon \
2020-04-14 18:09:43 +03:00
$VAR_LOCATION /remotes/im/one-probes.d/host/monitor \
$VAR_LOCATION /remotes/im/one-probes.d/host/system \
$VAR_LOCATION /remotes/im/one-probes.d/vm/monitor \
$VAR_LOCATION /remotes/im/one-probes.d/vm/status \
2021-01-25 13:03:13 +03:00
$VAR_LOCATION /remotes/im/one-probes.d/vm/snapshot \
2021-09-17 10:51:14 +03:00
$VAR_LOCATION /remotes/im/equinix.d \
$VAR_LOCATION /remotes/im/equinix-probes.d/host/beacon \
$VAR_LOCATION /remotes/im/equinix-probes.d/host/monitor \
$VAR_LOCATION /remotes/im/equinix-probes.d/host/system \
$VAR_LOCATION /remotes/im/equinix-probes.d/vm/monitor \
$VAR_LOCATION /remotes/im/equinix-probes.d/vm/status \
$VAR_LOCATION /remotes/im/equinix-probes.d/vm/snapshot \
2012-09-24 18:18:17 +04:00
$VAR_LOCATION /remotes/vmm \
2020-03-07 20:36:20 +03:00
$VAR_LOCATION /remotes/vmm/lib \
2012-09-24 18:18:17 +04:00
$VAR_LOCATION /remotes/vmm/kvm \
2014-09-19 15:14:57 +04:00
$VAR_LOCATION /remotes/vmm/vcenter \
2013-09-03 20:40:41 +04:00
$VAR_LOCATION /remotes/vmm/ec2 \
2014-06-19 19:03:18 +04:00
$VAR_LOCATION /remotes/vmm/az \
2018-02-15 18:00:09 +03:00
$VAR_LOCATION /remotes/vmm/one \
2018-11-28 14:26:59 +03:00
$VAR_LOCATION /remotes/vmm/lxd \
2021-03-04 23:07:50 +03:00
$VAR_LOCATION /remotes/vmm/lxc \
2021-09-17 10:51:14 +03:00
$VAR_LOCATION /remotes/vmm/equinix \
2020-02-20 19:06:45 +03:00
$VAR_LOCATION /remotes/vmm/firecracker \
2012-09-24 18:18:17 +04:00
$VAR_LOCATION /remotes/vnm \
$VAR_LOCATION /remotes/vnm/802.1Q \
2019-04-22 12:27:20 +03:00
$VAR_LOCATION /remotes/vnm/802.1Q/pre.d \
$VAR_LOCATION /remotes/vnm/802.1Q/post.d \
$VAR_LOCATION /remotes/vnm/802.1Q/clean.d \
2015-01-05 04:31:03 +03:00
$VAR_LOCATION /remotes/vnm/vxlan \
2019-04-22 12:27:20 +03:00
$VAR_LOCATION /remotes/vnm/vxlan/pre.d \
$VAR_LOCATION /remotes/vnm/vxlan/post.d \
$VAR_LOCATION /remotes/vnm/vxlan/clean.d \
2012-09-24 18:18:17 +04:00
$VAR_LOCATION /remotes/vnm/dummy \
2019-04-22 12:27:20 +03:00
$VAR_LOCATION /remotes/vnm/dummy/pre.d \
$VAR_LOCATION /remotes/vnm/dummy/post.d \
$VAR_LOCATION /remotes/vnm/dummy/clean.d \
2018-05-16 16:24:37 +03:00
$VAR_LOCATION /remotes/vnm/bridge \
2019-04-22 12:27:20 +03:00
$VAR_LOCATION /remotes/vnm/bridge/pre.d \
$VAR_LOCATION /remotes/vnm/bridge/post.d \
$VAR_LOCATION /remotes/vnm/bridge/clean.d \
2012-09-24 18:18:17 +04:00
$VAR_LOCATION /remotes/vnm/ebtables \
2019-04-22 12:27:20 +03:00
$VAR_LOCATION /remotes/vnm/ebtables/pre.d \
$VAR_LOCATION /remotes/vnm/ebtables/post.d \
$VAR_LOCATION /remotes/vnm/ebtables/clean.d \
2012-09-24 18:18:17 +04:00
$VAR_LOCATION /remotes/vnm/fw \
2019-04-22 12:27:20 +03:00
$VAR_LOCATION /remotes/vnm/fw/pre.d \
$VAR_LOCATION /remotes/vnm/fw/post.d \
$VAR_LOCATION /remotes/vnm/fw/clean.d \
2012-09-24 18:18:17 +04:00
$VAR_LOCATION /remotes/vnm/ovswitch \
2019-04-22 12:27:20 +03:00
$VAR_LOCATION /remotes/vnm/ovswitch/pre.d \
$VAR_LOCATION /remotes/vnm/ovswitch/post.d \
$VAR_LOCATION /remotes/vnm/ovswitch/clean.d \
2018-02-28 19:31:38 +03:00
$VAR_LOCATION /remotes/vnm/ovswitch_vxlan \
2019-04-22 12:27:20 +03:00
$VAR_LOCATION /remotes/vnm/ovswitch_vxlan/pre.d \
$VAR_LOCATION /remotes/vnm/ovswitch_vxlan/post.d \
$VAR_LOCATION /remotes/vnm/ovswitch_vxlan/clean.d \
2017-03-30 13:14:49 +03:00
$VAR_LOCATION /remotes/vnm/vcenter \
2019-04-22 12:27:20 +03:00
$VAR_LOCATION /remotes/vnm/vcenter/pre.d \
$VAR_LOCATION /remotes/vnm/vcenter/post.d \
$VAR_LOCATION /remotes/vnm/vcenter/clean.d \
2020-12-15 12:07:06 +03:00
$VAR_LOCATION /remotes/vnm/elastic \
2022-02-03 17:16:08 +03:00
$VAR_LOCATION /remotes/vnm/elastic/pre.d \
$VAR_LOCATION /remotes/vnm/elastic/clean.d \
2021-04-30 12:49:51 +03:00
$VAR_LOCATION /remotes/vnm/nodeport\
2020-03-05 18:36:06 +03:00
$VAR_LOCATION /remotes/vnm/hooks/pre \
$VAR_LOCATION /remotes/vnm/hooks/post \
$VAR_LOCATION /remotes/vnm/hooks/clean \
2012-09-24 18:18:17 +04:00
$VAR_LOCATION /remotes/tm/ \
$VAR_LOCATION /remotes/tm/dummy \
F #5516: New backup interface for OpenNebula
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Neal Hansen <nhansen@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io>
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
BACKUP INTERFACE
=================
* Backups are exposed through a a special Datastore (BACKUP_DS) and
Image (BACKUP) types. These new types can only be used for backup'ing
up VMs. This approach allows to:
- Implement tier based backup policies (backups made on different
locations).
- Leverage access control and quota systems
- Support differnt storage and backup technologies
* Backup interface for the VMs:
- VM configures backups with BACKUP_CONFIG. This attribute can be set
in the VM template or updated with updateconf API call. It can include:
+ BACKUP_VOLATILE: To backup or not volatile disks
+ FS_FREEZE: How the FS is freeze for running VMs (qemu-agent,
suspend or none). When possible backups are crash consistent.
+ KEEP_LAST: keep only a given number of backups.
- Backups are initiated by the one.vm.backup API call that requires
the target Datastore to perform the backup (one-shot). This is
exposed by the onevm backup command.
- Backups can be periodic through scheduled actions.
- Backup configuration is updated with one.vm.updateconf API call.
* Restore interface:
- Restores are initiated by the one.image.restore API call. This is
exposed by oneimage restore command.
- Restore include configurable options for the VM template
+ NO_IP: to not preserve IP addresses (but keep the NICs and network
mapping)
+ NO_NIC: to not preserve network mappings
- Other template attributes:
+ Clean PCI devices, including network configuration in case of TYPE=NIC
attributes. By default it removes SHORT_ADDRESS and leave the "auto"
selection attributes.
+ Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node
- It is possible to restore single files stored in the repository by
using the backup specific URL.
* Sunstone (Ruby version) has been updated to expose this feautres.
BACKUP DRIVERS & IMPLEMENTATION
===============================
* Backup operation is implemented by a combination of 3 driver operations:
- VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate
backups for RUNNING VMs.
- TM. This commit introduces 2 new operations (and their
corresponding _live variants):
+ pre_backup(_live): Prepares the disks to be back'ed up in the
repository. It is specific to the driver: (i) ceph uses the export
operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as
needed.
+ post_backup(_live): Performs cleanning operations, i.e. KVM
snapshots or tmp dirs.
- DATASTORE. Each backup technology is represented by its
corresponfing driver, that needs to implement:
+ backup: it takes the VM disks in file (qcow2) format and stores it
the backup repository.
+ restore: it takes a backup image and restores the associated disks
and VM template.
+ monitor: to gather available space in the repository
+ rm: to remove existing backups
+ stat: to return the "restored" size of a disk stored in a backup
+ downloader pseudo-URL handler: in the form
<backup_proto>://<driver_snapshot_id>/<disk filename>
BACKUP MANAGEMENT
=================
Backup actions may potentially take some time, leaving some vmm_exec threads in
use for a long time, stucking other vmm operations. Backups are planned
by the scheduler through the sched action interface.
Two attributes has been added to sched.conf:
* MAX_BACKUPS max active backup operations in the cloud. No more
backups will be started beyond this limit.
* MAX_BACKUPS_HOST max number of backups per host
* Fix onevm CLI to properly show and manage schedule actions. --schedule
supports now, as well as relative times +<seconds_from_stime>
onvm backup --schedule now -d 100 63
* Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs
to use the batch interface or request specific permissions
Internal restructure of Scheduler:
- All sched_actions interface is now in SchedActionsXML class and files.
This class uses references to VM XML, and MUST be used in the same
lifetime scope.
- XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as
static functions.
- VirtualMachineActionPool includes counters for active backups (total
and per host).
SUPPORTED PLATFORMS
====================
* hypervisor: KVM
* TM: qcow2/shared/ssh, ceph
* backup: restic, rsync
Notes on Ceph
* Ceph backups are performed in the following steps:
1. A snapshot of each disk is taken (group snapshots cannot be used as
it seems we cannot export the disks afterwards)
2. Disks are export to a file
3. File is converted to qcow2 format
4. Disk files are upload to the backup repo
TODO:
* Confirm crash consistent snapshots cannot be used in Ceph
TODO:
* Check if using VM dir instead of full path is better to accomodate
DS migrations i.e.:
- Current path: /var/lib/one/datastores/100/53/backup/disk.0
- Proposal: 53/backup/disk.0
RESTIC DRIVER
=============
Developed together with this feature is part of the EE edtion.
* It supports the SFTP protocol, the following attributes are
supported:
- RESTIC_SFTP_SERVER
- RESTIC_SFTP_USER: only if different from oneadmin
- RESTIC_PASSWORD
- RESTIC_IONICE: Run restic under a given ionice priority (class 2)
- RESTIC_NICE: Run restic under a given nice
- RESTIC_BWLIMIT: Limit restic upload/download BW
- RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes:
off, auto, max). This requires repositories version 2. By default,
auto is used (average compression without to much CPU usage)
- RESTIC_CONNECTIONS: Sets the number of concurrent connections to a
backend (5 by default). For high-latency backends this number can be
increased.
* downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name>
snapshot_id is the restic snapshot hash. To recover single disk images
from a backup. This URLs support:
- RESTIC_CONNECTIONS
- RESTIC_BWLIMIT
- RESTIC_IONICE
- RESTIC_NICE
These options needs to be defined in the associated datastore.
RSYNC DRIVER
=============
A rsync driver is included as part of the CE distribution. It uses the
rsync tool to store backups in a remote server through SSH:
* The following attributes are supported to configure the backup
datastore:
- RSYNC_HOST
- RSYNC_USER
- RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default)
* downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover
single files from an existing backup. (RSYNC_HOST and RSYN_USER needs
to be set in ds_id
EMULATOR_CPUS
=============
This commit includes a non related backup feature:
* Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the
CPU IDs where the emulator threads will be pinned. If this value is
not defined the allocated CPU wll be used when using a PIN policy.
(cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b)
F OpenNebula/one#5516: adding rsync backup driver
(cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305)
F OpenNebula/one#5516: update install.sh, add vmid to source, some polish
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d)
F OpenNebula/one#5516: cleanup
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340)
F OpenNebula/one#5516: update downloader, default args, size check
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305)
LL
(cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
$VAR_LOCATION /remotes/tm/lib \
2012-09-24 18:18:17 +04:00
$VAR_LOCATION /remotes/tm/shared \
2013-11-11 20:43:57 +04:00
$VAR_LOCATION /remotes/tm/fs_lvm \
2021-05-12 11:57:01 +03:00
$VAR_LOCATION /remotes/tm/fs_lvm_ssh \
2012-09-24 18:18:17 +04:00
$VAR_LOCATION /remotes/tm/qcow2 \
$VAR_LOCATION /remotes/tm/ssh \
2013-02-14 21:55:37 +04:00
$VAR_LOCATION /remotes/tm/ceph \
2014-06-17 21:31:53 +04:00
$VAR_LOCATION /remotes/tm/dev \
2016-01-08 15:18:22 +03:00
$VAR_LOCATION /remotes/tm/vcenter \
2016-05-09 13:09:07 +03:00
$VAR_LOCATION /remotes/tm/iscsi_libvirt \
2012-09-24 18:18:17 +04:00
$VAR_LOCATION /remotes/hooks \
2021-07-22 17:29:26 +03:00
$VAR_LOCATION /remotes/hooks/autostart \
2012-09-24 18:18:17 +04:00
$VAR_LOCATION /remotes/hooks/ft \
2017-05-18 17:41:18 +03:00
$VAR_LOCATION /remotes/hooks/raft \
2012-09-24 18:18:17 +04:00
$VAR_LOCATION /remotes/datastore \
$VAR_LOCATION /remotes/datastore/dummy \
$VAR_LOCATION /remotes/datastore/fs \
2013-02-14 21:55:37 +04:00
$VAR_LOCATION /remotes/datastore/ceph \
2014-06-17 21:31:53 +04:00
$VAR_LOCATION /remotes/datastore/dev \
2016-01-07 20:59:38 +03:00
$VAR_LOCATION /remotes/datastore/vcenter \
F #5516: New backup interface for OpenNebula
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Neal Hansen <nhansen@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io>
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
BACKUP INTERFACE
=================
* Backups are exposed through a a special Datastore (BACKUP_DS) and
Image (BACKUP) types. These new types can only be used for backup'ing
up VMs. This approach allows to:
- Implement tier based backup policies (backups made on different
locations).
- Leverage access control and quota systems
- Support differnt storage and backup technologies
* Backup interface for the VMs:
- VM configures backups with BACKUP_CONFIG. This attribute can be set
in the VM template or updated with updateconf API call. It can include:
+ BACKUP_VOLATILE: To backup or not volatile disks
+ FS_FREEZE: How the FS is freeze for running VMs (qemu-agent,
suspend or none). When possible backups are crash consistent.
+ KEEP_LAST: keep only a given number of backups.
- Backups are initiated by the one.vm.backup API call that requires
the target Datastore to perform the backup (one-shot). This is
exposed by the onevm backup command.
- Backups can be periodic through scheduled actions.
- Backup configuration is updated with one.vm.updateconf API call.
* Restore interface:
- Restores are initiated by the one.image.restore API call. This is
exposed by oneimage restore command.
- Restore include configurable options for the VM template
+ NO_IP: to not preserve IP addresses (but keep the NICs and network
mapping)
+ NO_NIC: to not preserve network mappings
- Other template attributes:
+ Clean PCI devices, including network configuration in case of TYPE=NIC
attributes. By default it removes SHORT_ADDRESS and leave the "auto"
selection attributes.
+ Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node
- It is possible to restore single files stored in the repository by
using the backup specific URL.
* Sunstone (Ruby version) has been updated to expose this feautres.
BACKUP DRIVERS & IMPLEMENTATION
===============================
* Backup operation is implemented by a combination of 3 driver operations:
- VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate
backups for RUNNING VMs.
- TM. This commit introduces 2 new operations (and their
corresponding _live variants):
+ pre_backup(_live): Prepares the disks to be back'ed up in the
repository. It is specific to the driver: (i) ceph uses the export
operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as
needed.
+ post_backup(_live): Performs cleanning operations, i.e. KVM
snapshots or tmp dirs.
- DATASTORE. Each backup technology is represented by its
corresponfing driver, that needs to implement:
+ backup: it takes the VM disks in file (qcow2) format and stores it
the backup repository.
+ restore: it takes a backup image and restores the associated disks
and VM template.
+ monitor: to gather available space in the repository
+ rm: to remove existing backups
+ stat: to return the "restored" size of a disk stored in a backup
+ downloader pseudo-URL handler: in the form
<backup_proto>://<driver_snapshot_id>/<disk filename>
BACKUP MANAGEMENT
=================
Backup actions may potentially take some time, leaving some vmm_exec threads in
use for a long time, stucking other vmm operations. Backups are planned
by the scheduler through the sched action interface.
Two attributes has been added to sched.conf:
* MAX_BACKUPS max active backup operations in the cloud. No more
backups will be started beyond this limit.
* MAX_BACKUPS_HOST max number of backups per host
* Fix onevm CLI to properly show and manage schedule actions. --schedule
supports now, as well as relative times +<seconds_from_stime>
onvm backup --schedule now -d 100 63
* Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs
to use the batch interface or request specific permissions
Internal restructure of Scheduler:
- All sched_actions interface is now in SchedActionsXML class and files.
This class uses references to VM XML, and MUST be used in the same
lifetime scope.
- XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as
static functions.
- VirtualMachineActionPool includes counters for active backups (total
and per host).
SUPPORTED PLATFORMS
====================
* hypervisor: KVM
* TM: qcow2/shared/ssh, ceph
* backup: restic, rsync
Notes on Ceph
* Ceph backups are performed in the following steps:
1. A snapshot of each disk is taken (group snapshots cannot be used as
it seems we cannot export the disks afterwards)
2. Disks are export to a file
3. File is converted to qcow2 format
4. Disk files are upload to the backup repo
TODO:
* Confirm crash consistent snapshots cannot be used in Ceph
TODO:
* Check if using VM dir instead of full path is better to accomodate
DS migrations i.e.:
- Current path: /var/lib/one/datastores/100/53/backup/disk.0
- Proposal: 53/backup/disk.0
RESTIC DRIVER
=============
Developed together with this feature is part of the EE edtion.
* It supports the SFTP protocol, the following attributes are
supported:
- RESTIC_SFTP_SERVER
- RESTIC_SFTP_USER: only if different from oneadmin
- RESTIC_PASSWORD
- RESTIC_IONICE: Run restic under a given ionice priority (class 2)
- RESTIC_NICE: Run restic under a given nice
- RESTIC_BWLIMIT: Limit restic upload/download BW
- RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes:
off, auto, max). This requires repositories version 2. By default,
auto is used (average compression without to much CPU usage)
- RESTIC_CONNECTIONS: Sets the number of concurrent connections to a
backend (5 by default). For high-latency backends this number can be
increased.
* downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name>
snapshot_id is the restic snapshot hash. To recover single disk images
from a backup. This URLs support:
- RESTIC_CONNECTIONS
- RESTIC_BWLIMIT
- RESTIC_IONICE
- RESTIC_NICE
These options needs to be defined in the associated datastore.
RSYNC DRIVER
=============
A rsync driver is included as part of the CE distribution. It uses the
rsync tool to store backups in a remote server through SSH:
* The following attributes are supported to configure the backup
datastore:
- RSYNC_HOST
- RSYNC_USER
- RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default)
* downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover
single files from an existing backup. (RSYNC_HOST and RSYN_USER needs
to be set in ds_id
EMULATOR_CPUS
=============
This commit includes a non related backup feature:
* Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the
CPU IDs where the emulator threads will be pinned. If this value is
not defined the allocated CPU wll be used when using a PIN policy.
(cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b)
F OpenNebula/one#5516: adding rsync backup driver
(cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305)
F OpenNebula/one#5516: update install.sh, add vmid to source, some polish
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d)
F OpenNebula/one#5516: cleanup
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340)
F OpenNebula/one#5516: update downloader, default args, size check
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305)
LL
(cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
$VAR_LOCATION /remotes/datastore/iscsi_libvirt \
$VAR_LOCATION /remotes/datastore/rsync \
2015-12-17 14:39:27 +03:00
$VAR_LOCATION /remotes/market \
$VAR_LOCATION /remotes/market/http \
2015-12-24 20:49:21 +03:00
$VAR_LOCATION /remotes/market/one \
2016-02-03 19:30:27 +03:00
$VAR_LOCATION /remotes/market/s3 \
2020-05-18 04:01:50 +03:00
$VAR_LOCATION /remotes/market/common \
2018-12-11 14:21:43 +03:00
$VAR_LOCATION /remotes/market/linuxcontainers \
2020-02-07 12:42:58 +03:00
$VAR_LOCATION /remotes/market/turnkeylinux \
2020-05-10 21:14:20 +03:00
$VAR_LOCATION /remotes/market/dockerhub \
2021-05-31 14:27:53 +03:00
$VAR_LOCATION /remotes/market/docker_registry \
2012-09-24 18:18:17 +04:00
$VAR_LOCATION /remotes/auth \
$VAR_LOCATION /remotes/auth/plain \
$VAR_LOCATION /remotes/auth/ssh \
$VAR_LOCATION /remotes/auth/x509 \
$VAR_LOCATION /remotes/auth/ldap \
$VAR_LOCATION /remotes/auth/server_x509 \
$VAR_LOCATION /remotes/auth/server_cipher \
2016-08-19 19:24:32 +03:00
$VAR_LOCATION /remotes/auth/dummy \
2019-07-04 13:38:25 +03:00
$VAR_LOCATION /remotes/ipam/dummy \
2021-09-17 10:51:14 +03:00
$VAR_LOCATION /remotes/ipam/equinix \
2021-05-19 13:24:19 +03:00
$VAR_LOCATION /remotes/ipam/vultr \
2020-12-15 12:07:06 +03:00
$VAR_LOCATION /remotes/ipam/aws"
2010-11-16 14:29:14 +03:00
2012-08-01 19:21:31 +04:00
SUNSTONE_DIRS = " $SUNSTONE_LOCATION /routes \
2012-09-24 18:18:17 +04:00
$SUNSTONE_LOCATION /models \
$SUNSTONE_LOCATION /models/OpenNebulaJSON \
2020-09-10 14:36:24 +03:00
$SUNSTONE_LOCATION /views \
$SUNSTONE_LOCATION /services"
2015-07-20 17:50:38 +03:00
2018-05-10 16:47:33 +03:00
SUNSTONE_MINIFIED_DIRS = " $SUNSTONE_LOCATION /public \
2015-06-23 19:04:44 +03:00
$SUNSTONE_LOCATION /public/dist \
2015-06-26 18:44:50 +03:00
$SUNSTONE_LOCATION /public/dist/console \
2012-09-24 18:18:17 +04:00
$SUNSTONE_LOCATION /public/css \
2015-08-27 18:38:19 +03:00
$SUNSTONE_LOCATION /public/css/opensans \
2018-05-10 18:58:54 +03:00
$SUNSTONE_LOCATION /public/bower_components/fontawesome \
$SUNSTONE_LOCATION /public/bower_components/fontawesome/web-fonts-with-css \
$SUNSTONE_LOCATION /public/bower_components/fontawesome/web-fonts-with-css/webfonts \
2015-06-23 19:20:22 +03:00
$SUNSTONE_LOCATION /public/locale/languages \
2012-09-24 18:18:17 +04:00
$SUNSTONE_LOCATION /public/images \
2015-07-20 17:50:38 +03:00
$SUNSTONE_LOCATION /public/images/logos"
2011-12-02 20:37:49 +04:00
2020-07-02 17:12:39 +03:00
FIREEDGE_DIRS = " $FIREEDGE_LOCATION "
2013-07-10 15:29:53 +04:00
ONEFLOW_DIRS = " $ONEFLOW_LOCATION /lib \
2013-07-10 20:32:05 +04:00
$ONEFLOW_LOCATION /lib/strategy \
$ONEFLOW_LOCATION /lib/models"
2013-07-10 15:29:53 +04:00
2011-02-23 19:27:17 +03:00
LIB_OCA_CLIENT_DIRS = " $LIB_LOCATION /ruby \
2021-11-04 20:49:04 +03:00
$LIB_LOCATION /ruby/opennebula \
$LIB_LOCATION /ruby/opennebula/flow"
2011-02-23 19:27:17 +03:00
2011-06-13 18:08:07 +04:00
LIB_CLI_CLIENT_DIRS = " $LIB_LOCATION /ruby/cli \
2012-09-24 18:18:17 +04:00
$LIB_LOCATION /ruby/cli/one_helper"
2011-06-13 18:08:07 +04:00
2011-09-07 14:27:34 +04:00
CONF_CLI_DIRS = " $ETC_LOCATION /cli "
2011-02-10 17:57:11 +03:00
2011-02-23 19:27:17 +03:00
if [ " $CLIENT " = "yes" ] ; then
2021-03-02 22:34:57 +03:00
MAKE_DIRS = " $MAKE_DIRS \
2012-09-24 18:18:17 +04:00
$LIB_OCA_CLIENT_DIRS $LIB_CLI_CLIENT_DIRS $CONF_CLI_DIRS \
2014-01-29 18:53:19 +04:00
$ETC_LOCATION "
2013-06-27 13:36:08 +04:00
elif [ " $ONEGATE " = "yes" ] ; then
MAKE_DIRS = " $MAKE_DIRS $LIB_OCA_CLIENT_DIRS "
2022-11-14 22:48:30 +03:00
elif [ " $ONEGATE_PROXY " = "yes" ] ; then
MAKE_DIRS = " $MAKE_DIRS $LIB_OCA_CLIENT_DIRS "
2011-02-23 19:27:17 +03:00
elif [ " $SUNSTONE " = "yes" ] ; then
2015-07-20 17:50:38 +03:00
if [ " $SUNSTONE_DEV " = "no" ] ; then
MAKE_DIRS = " $MAKE_DIRS $SUNSTONE_DIRS $SUNSTONE_MINIFIED_DIRS $LIB_OCA_CLIENT_DIRS "
else
2012-09-24 18:18:17 +04:00
MAKE_DIRS = " $MAKE_DIRS $SUNSTONE_DIRS $LIB_OCA_CLIENT_DIRS "
2015-07-20 17:50:38 +03:00
fi
2013-07-10 15:29:53 +04:00
elif [ " $ONEFLOW " = "yes" ] ; then
MAKE_DIRS = " $MAKE_DIRS $ONEFLOW_DIRS $LIB_OCA_CLIENT_DIRS "
2015-07-20 17:50:38 +03:00
elif [ " $SUNSTONE_DEV " = "no" ] ; then
MAKE_DIRS = " $MAKE_DIRS $SHARE_DIRS $ETC_DIRS $LIB_DIRS $VAR_DIRS \
$SUNSTONE_DIRS $SUNSTONE_MINIFIED_DIRS $ONEFLOW_DIRS "
2011-02-23 19:27:17 +03:00
else
2012-09-24 18:18:17 +04:00
MAKE_DIRS = " $MAKE_DIRS $SHARE_DIRS $ETC_DIRS $LIB_DIRS $VAR_DIRS \
2020-07-02 17:12:39 +03:00
$SUNSTONE_DIRS $FIREEDGE_DIRS $ONEFLOW_DIRS "
2009-09-25 22:18:50 +04:00
fi
2009-01-02 17:58:51 +03:00
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# FILE DEFINITION, WHAT IS GOING TO BE INSTALLED AND WHERE
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
2011-02-10 20:16:18 +03:00
INSTALL_FILES = (
2012-09-24 18:18:17 +04:00
BIN_FILES:$BIN_LOCATION
2020-04-08 11:44:48 +03:00
SBIN_FILES:$SBIN_LOCATION
2012-09-24 18:18:17 +04:00
INCLUDE_FILES:$INCLUDE_LOCATION
LIB_FILES:$LIB_LOCATION
RUBY_LIB_FILES:$LIB_LOCATION /ruby
2012-12-20 14:47:39 +04:00
RUBY_AUTH_LIB_FILES:$LIB_LOCATION /ruby/opennebula
2012-12-18 18:52:11 +04:00
RUBY_OPENNEBULA_LIB_FILES:$LIB_LOCATION /ruby/opennebula
2020-09-25 11:15:19 +03:00
RUBY_OPENNEBULA_LIB_FLOW_FILES:$LIB_LOCATION /ruby/opennebula/flow
2012-09-24 18:18:17 +04:00
MAD_RUBY_LIB_FILES:$LIB_LOCATION /ruby
MAD_RUBY_LIB_FILES:$VAR_LOCATION /remotes
MAD_SH_LIB_FILES:$LIB_LOCATION /sh
MAD_SH_LIB_FILES:$VAR_LOCATION /remotes
2022-04-05 16:15:45 +03:00
REMOTE_FILES:$VAR_LOCATION /remotes
2014-03-03 22:11:17 +04:00
ONEDB_FILES:$LIB_LOCATION /ruby/onedb
2015-07-08 16:51:18 +03:00
ONEDB_PATCH_FILES:$LIB_LOCATION /ruby/onedb/patches
2012-09-24 18:18:17 +04:00
MADS_LIB_FILES:$LIB_LOCATION /mads
IM_PROBES_FILES:$VAR_LOCATION /remotes/im
2020-03-04 18:05:57 +03:00
IM_PROBES_LIB_FILES:$VAR_LOCATION /remotes/im/lib
2012-09-24 18:18:17 +04:00
IM_PROBES_KVM_FILES:$VAR_LOCATION /remotes/im/kvm.d
2021-03-03 14:20:37 +03:00
IM_PROBES_QEMU_FILES:$VAR_LOCATION /remotes/im/qemu.d
2020-03-23 12:45:21 +03:00
IM_PROBES_FIRECRACKER_FILES:$VAR_LOCATION /remotes/im/firecracker.d
2020-03-04 18:05:57 +03:00
IM_PROBES_DUMMY_FILES:$VAR_LOCATION /remotes/im/dummy.d
2018-11-28 14:26:59 +03:00
IM_PROBES_LXD_FILES:$VAR_LOCATION /remotes/im/lxd.d
2021-03-04 23:07:50 +03:00
IM_PROBES_LXC_FILES:$VAR_LOCATION /remotes/im/lxc.d
2020-04-05 20:50:45 +03:00
IM_PROBES_VCENTER_FILES:$VAR_LOCATION /remotes/im/vcenter.d
IM_PROBES_EC2_FILES:$VAR_LOCATION /remotes/im/ec2.d
IM_PROBES_AZ_FILES:$VAR_LOCATION /remotes/im/az.d
IM_PROBES_ONE_FILES:$VAR_LOCATION /remotes/im/one.d
2021-09-17 10:51:14 +03:00
IM_PROBES_EQUINIX_FILES:$VAR_LOCATION /remotes/im/equinix.d
2020-03-04 18:05:57 +03:00
IM_PROBES_KVM_HOST_BEACON_FILES:$VAR_LOCATION /remotes/im/kvm-probes.d/host/beacon
IM_PROBES_KVM_HOST_MONITOR_FILES:$VAR_LOCATION /remotes/im/kvm-probes.d/host/monitor
IM_PROBES_KVM_HOST_SYSTEM_FILES:$VAR_LOCATION /remotes/im/kvm-probes.d/host/system
IM_PROBES_KVM_VM_MONITOR_FILES:$VAR_LOCATION /remotes/im/kvm-probes.d/vm/monitor
IM_PROBES_KVM_VM_STATUS_FILES:$VAR_LOCATION /remotes/im/kvm-probes.d/vm/status
2021-01-21 12:30:45 +03:00
IM_PROBES_KVM_VM_SNAPSHOT_FILES:$VAR_LOCATION /remotes/im/kvm-probes.d/vm/snapshot
2020-03-04 18:05:57 +03:00
IM_PROBES_ETC_KVM_PROBES_FILES:$VAR_LOCATION /remotes/etc/im/kvm-probes.d
2021-03-03 18:18:01 +03:00
IM_PROBES_QEMU_HOST_BEACON_FILES:$VAR_LOCATION /remotes/im/qemu-probes.d/host/beacon
IM_PROBES_QEMU_HOST_MONITOR_FILES:$VAR_LOCATION /remotes/im/qemu-probes.d/host/monitor
IM_PROBES_QEMU_HOST_SYSTEM_FILES:$VAR_LOCATION /remotes/im/qemu-probes.d/host/system
IM_PROBES_QEMU_VM_MONITOR_FILES:$VAR_LOCATION /remotes/im/qemu-probes.d/vm/monitor
IM_PROBES_QEMU_VM_STATUS_FILES:$VAR_LOCATION /remotes/im/qemu-probes.d/vm/status
IM_PROBES_QEMU_VM_SNAPSHOT_FILES:$VAR_LOCATION /remotes/im/qemu-probes.d/vm/snapshot
IM_PROBES_ETC_QEMU_PROBES_FILES:$VAR_LOCATION /remotes/etc/im/qemu-probes.d
2020-03-04 18:05:57 +03:00
IM_PROBES_DUMMY_HOST_BEACON_FILES:$VAR_LOCATION /remotes/im/dummy-probes.d/host/beacon
IM_PROBES_DUMMY_HOST_MONITOR_FILES:$VAR_LOCATION /remotes/im/dummy-probes.d/host/monitor
IM_PROBES_DUMMY_HOST_SYSTEM_FILES:$VAR_LOCATION /remotes/im/dummy-probes.d/host/system
IM_PROBES_DUMMY_VM_MONITOR_FILES:$VAR_LOCATION /remotes/im/dummy-probes.d/vm/monitor
IM_PROBES_DUMMY_VM_STATUS_FILES:$VAR_LOCATION /remotes/im/dummy-probes.d/vm/status
IM_PROBES_LXD_HOST_BEACON_FILES:$VAR_LOCATION /remotes/im/lxd-probes.d/host/beacon
IM_PROBES_LXD_HOST_MONITOR_FILES:$VAR_LOCATION /remotes/im/lxd-probes.d/host/monitor
IM_PROBES_LXD_HOST_SYSTEM_FILES:$VAR_LOCATION /remotes/im/lxd-probes.d/host/system
IM_PROBES_LXD_VM_MONITOR_FILES:$VAR_LOCATION /remotes/im/lxd-probes.d/vm/monitor
IM_PROBES_LXD_VM_STATUS_FILES:$VAR_LOCATION /remotes/im/lxd-probes.d/vm/status
2018-11-28 14:26:59 +03:00
IM_PROBES_LXD_PROBES_FILES:$VAR_LOCATION /remotes/im/lxd-probes.d
IM_PROBES_ETC_LXD_PROBES_FILES:$VAR_LOCATION /remotes/etc/im/lxd-probes.d
2021-03-04 23:07:50 +03:00
IM_PROBES_LXC_HOST_BEACON_FILES:$VAR_LOCATION /remotes/im/lxc-probes.d/host/beacon
IM_PROBES_LXC_HOST_MONITOR_FILES:$VAR_LOCATION /remotes/im/lxc-probes.d/host/monitor
IM_PROBES_LXC_HOST_SYSTEM_FILES:$VAR_LOCATION /remotes/im/lxc-probes.d/host/system
IM_PROBES_LXC_VM_MONITOR_FILES:$VAR_LOCATION /remotes/im/lxc-probes.d/vm/monitor
IM_PROBES_LXC_VM_STATUS_FILES:$VAR_LOCATION /remotes/im/lxc-probes.d/vm/status
IM_PROBES_LXC_PROBES_FILES:$VAR_LOCATION /remotes/im/lxc-probes.d
IM_PROBES_ETC_LXC_PROBES_FILES:$VAR_LOCATION /remotes/etc/im/lxc-probes.d
2020-04-20 18:48:22 +03:00
IM_PROBES_AZ_HOST_BEACON_FILES:$VAR_LOCATION /remotes/im/az-probes.d/host/beacon
2020-04-05 20:50:45 +03:00
IM_PROBES_AZ_HOST_MONITOR_FILES:$VAR_LOCATION /remotes/im/az-probes.d/host/monitor
IM_PROBES_AZ_HOST_SYSTEM_FILES:$VAR_LOCATION /remotes/im/az-probes.d/host/system
IM_PROBES_AZ_VM_MONITOR_FILES:$VAR_LOCATION /remotes/im/az-probes.d/vm/monitor
IM_PROBES_AZ_VM_STATUS_FILES:$VAR_LOCATION /remotes/im/az-probes.d/vm/status
2020-04-20 18:48:22 +03:00
IM_PROBES_EC2_HOST_BEACON_FILES:$VAR_LOCATION /remotes/im/ec2-probes.d/host/beacon
2020-04-05 20:50:45 +03:00
IM_PROBES_EC2_HOST_MONITOR_FILES:$VAR_LOCATION /remotes/im/ec2-probes.d/host/monitor
IM_PROBES_EC2_HOST_SYSTEM_FILES:$VAR_LOCATION /remotes/im/ec2-probes.d/host/system
IM_PROBES_EC2_VM_MONITOR_FILES:$VAR_LOCATION /remotes/im/ec2-probes.d/vm/monitor
IM_PROBES_EC2_VM_STATUS_FILES:$VAR_LOCATION /remotes/im/ec2-probes.d/vm/status
2020-04-20 18:48:22 +03:00
IM_PROBES_ONE_HOST_BEACON_FILES:$VAR_LOCATION /remotes/im/one-probes.d/host/beacon
2020-04-14 18:09:43 +03:00
IM_PROBES_ONE_HOST_MONITOR_FILES:$VAR_LOCATION /remotes/im/one-probes.d/host/monitor
IM_PROBES_ONE_HOST_SYSTEM_FILES:$VAR_LOCATION /remotes/im/one-probes.d/host/system
IM_PROBES_ONE_VM_MONITOR_FILES:$VAR_LOCATION /remotes/im/one-probes.d/vm/monitor
IM_PROBES_ONE_VM_STATUS_FILES:$VAR_LOCATION /remotes/im/one-probes.d/vm/status
2021-09-17 10:51:14 +03:00
IM_PROBES_EQUINIX_HOST_BEACON_FILES:$VAR_LOCATION /remotes/im/equinix-probes.d/host/beacon
IM_PROBES_EQUINIX_HOST_MONITOR_FILES:$VAR_LOCATION /remotes/im/equinix-probes.d/host/monitor
IM_PROBES_EQUINIX_HOST_SYSTEM_FILES:$VAR_LOCATION /remotes/im/equinix-probes.d/host/system
IM_PROBES_EQUINIX_VM_MONITOR_FILES:$VAR_LOCATION /remotes/im/equinix-probes.d/vm/monitor
IM_PROBES_EQUINIX_VM_STATUS_FILES:$VAR_LOCATION /remotes/im/equinix-probes.d/vm/status
2013-11-25 22:21:47 +04:00
IM_PROBES_VERSION:$VAR_LOCATION /remotes
2020-03-23 12:45:21 +03:00
IM_PROBES_FIRECRACKER_HOST_BEACON_FILES:$VAR_LOCATION /remotes/im/firecracker-probes.d/host/beacon
IM_PROBES_FIRECRACKER_HOST_MONITOR_FILES:$VAR_LOCATION /remotes/im/firecracker-probes.d/host/monitor
IM_PROBES_FIRECRACKER_HOST_SYSTEM_FILES:$VAR_LOCATION /remotes/im/firecracker-probes.d/host/system
IM_PROBES_FIRECRACKER_VM_MONITOR_FILES:$VAR_LOCATION /remotes/im/firecracker-probes.d/vm/monitor
IM_PROBES_FIRECRACKER_VM_STATUS_FILES:$VAR_LOCATION /remotes/im/firecracker-probes.d/vm/status
IM_PROBES_ETC_FIRECRACKER_PROBES_FILES:$VAR_LOCATION /remotes/etc/im/firecracker-probes.d
2012-09-24 18:18:17 +04:00
AUTH_SSH_FILES:$VAR_LOCATION /remotes/auth/ssh
AUTH_X509_FILES:$VAR_LOCATION /remotes/auth/x509
AUTH_LDAP_FILES:$VAR_LOCATION /remotes/auth/ldap
AUTH_SERVER_X509_FILES:$VAR_LOCATION /remotes/auth/server_x509
AUTH_SERVER_CIPHER_FILES:$VAR_LOCATION /remotes/auth/server_cipher
AUTH_DUMMY_FILES:$VAR_LOCATION /remotes/auth/dummy
AUTH_PLAIN_FILES:$VAR_LOCATION /remotes/auth/plain
2017-02-22 18:17:56 +03:00
VMM_EXEC_LIB_VCENTER_FILES:$LIB_LOCATION /ruby/vcenter_driver
2019-08-22 12:15:24 +03:00
VMM_EXEC_LIB_NSX_FILES:$LIB_LOCATION /ruby/nsx_driver
2020-03-07 20:36:20 +03:00
VMM_EXEC_LIB:$VAR_LOCATION /remotes/vmm/lib
2016-04-07 17:50:25 +03:00
VMM_EXEC_KVM_SCRIPTS:$VAR_LOCATION /remotes/vmm/kvm
2022-08-11 02:46:57 +03:00
VMM_EXEC_KVM_LIB:$VAR_LOCATION /remotes/vmm/kvm
2018-11-28 14:26:59 +03:00
VMM_EXEC_LXD_SCRIPTS:$VAR_LOCATION /remotes/vmm/lxd
VMM_EXEC_LXD_LIB:$VAR_LOCATION /remotes/vmm/lxd
2021-03-04 23:07:50 +03:00
VMM_EXEC_LXC_SCRIPTS:$VAR_LOCATION /remotes/vmm/lxc
VMM_EXEC_LXC_LIB:$VAR_LOCATION /remotes/vmm/lxc
2020-02-20 19:06:45 +03:00
VMM_EXEC_FIRECRACKER_SCRIPTS:$VAR_LOCATION /remotes/vmm/firecracker
VMM_EXEC_FIRECRACKER_LIB:$VAR_LOCATION /remotes/vmm/firecracker
2018-01-18 14:27:38 +03:00
VMM_EXEC_ETC_KVM_SCRIPTS:$VAR_LOCATION /remotes/etc/vmm/kvm
2018-11-28 14:26:59 +03:00
VMM_EXEC_ETC_LXD_SCRIPTS:$VAR_LOCATION /remotes/etc/vmm/lxd
2021-03-04 23:07:50 +03:00
VMM_EXEC_ETC_LXC_SCRIPTS:$VAR_LOCATION /remotes/etc/vmm/lxc
2021-09-10 20:19:45 +03:00
VMM_EXEC_ETC_LXC_PROFILES:$VAR_LOCATION /remotes/etc/vmm/lxc/profiles
2020-02-20 19:06:45 +03:00
VMM_EXEC_ETC_FIRECRACKER_SCRIPTS:$VAR_LOCATION /remotes/etc/vmm/firecracker
2014-09-19 15:14:57 +04:00
VMM_EXEC_VCENTER_SCRIPTS:$VAR_LOCATION /remotes/vmm/vcenter
2018-11-15 16:47:34 +03:00
VMM_EXEC_ETC_VCENTER_SCRIPTS:$VAR_LOCATION /remotes/etc/vmm/vcenter
2013-09-03 20:40:41 +04:00
VMM_EXEC_EC2_SCRIPTS:$VAR_LOCATION /remotes/vmm/ec2
2014-06-19 19:03:18 +04:00
VMM_EXEC_AZ_SCRIPTS:$VAR_LOCATION /remotes/vmm/az
2018-02-15 18:00:09 +03:00
VMM_EXEC_ONE_SCRIPTS:$VAR_LOCATION /remotes/vmm/one
2021-09-17 10:51:14 +03:00
VMM_EXEC_EQUINIX_SCRIPTS:$VAR_LOCATION /remotes/vmm/equinix
2012-09-24 18:18:17 +04:00
TM_FILES:$VAR_LOCATION /remotes/tm
F #5516: New backup interface for OpenNebula
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Neal Hansen <nhansen@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io>
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
BACKUP INTERFACE
=================
* Backups are exposed through a a special Datastore (BACKUP_DS) and
Image (BACKUP) types. These new types can only be used for backup'ing
up VMs. This approach allows to:
- Implement tier based backup policies (backups made on different
locations).
- Leverage access control and quota systems
- Support differnt storage and backup technologies
* Backup interface for the VMs:
- VM configures backups with BACKUP_CONFIG. This attribute can be set
in the VM template or updated with updateconf API call. It can include:
+ BACKUP_VOLATILE: To backup or not volatile disks
+ FS_FREEZE: How the FS is freeze for running VMs (qemu-agent,
suspend or none). When possible backups are crash consistent.
+ KEEP_LAST: keep only a given number of backups.
- Backups are initiated by the one.vm.backup API call that requires
the target Datastore to perform the backup (one-shot). This is
exposed by the onevm backup command.
- Backups can be periodic through scheduled actions.
- Backup configuration is updated with one.vm.updateconf API call.
* Restore interface:
- Restores are initiated by the one.image.restore API call. This is
exposed by oneimage restore command.
- Restore include configurable options for the VM template
+ NO_IP: to not preserve IP addresses (but keep the NICs and network
mapping)
+ NO_NIC: to not preserve network mappings
- Other template attributes:
+ Clean PCI devices, including network configuration in case of TYPE=NIC
attributes. By default it removes SHORT_ADDRESS and leave the "auto"
selection attributes.
+ Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node
- It is possible to restore single files stored in the repository by
using the backup specific URL.
* Sunstone (Ruby version) has been updated to expose this feautres.
BACKUP DRIVERS & IMPLEMENTATION
===============================
* Backup operation is implemented by a combination of 3 driver operations:
- VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate
backups for RUNNING VMs.
- TM. This commit introduces 2 new operations (and their
corresponding _live variants):
+ pre_backup(_live): Prepares the disks to be back'ed up in the
repository. It is specific to the driver: (i) ceph uses the export
operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as
needed.
+ post_backup(_live): Performs cleanning operations, i.e. KVM
snapshots or tmp dirs.
- DATASTORE. Each backup technology is represented by its
corresponfing driver, that needs to implement:
+ backup: it takes the VM disks in file (qcow2) format and stores it
the backup repository.
+ restore: it takes a backup image and restores the associated disks
and VM template.
+ monitor: to gather available space in the repository
+ rm: to remove existing backups
+ stat: to return the "restored" size of a disk stored in a backup
+ downloader pseudo-URL handler: in the form
<backup_proto>://<driver_snapshot_id>/<disk filename>
BACKUP MANAGEMENT
=================
Backup actions may potentially take some time, leaving some vmm_exec threads in
use for a long time, stucking other vmm operations. Backups are planned
by the scheduler through the sched action interface.
Two attributes has been added to sched.conf:
* MAX_BACKUPS max active backup operations in the cloud. No more
backups will be started beyond this limit.
* MAX_BACKUPS_HOST max number of backups per host
* Fix onevm CLI to properly show and manage schedule actions. --schedule
supports now, as well as relative times +<seconds_from_stime>
onvm backup --schedule now -d 100 63
* Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs
to use the batch interface or request specific permissions
Internal restructure of Scheduler:
- All sched_actions interface is now in SchedActionsXML class and files.
This class uses references to VM XML, and MUST be used in the same
lifetime scope.
- XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as
static functions.
- VirtualMachineActionPool includes counters for active backups (total
and per host).
SUPPORTED PLATFORMS
====================
* hypervisor: KVM
* TM: qcow2/shared/ssh, ceph
* backup: restic, rsync
Notes on Ceph
* Ceph backups are performed in the following steps:
1. A snapshot of each disk is taken (group snapshots cannot be used as
it seems we cannot export the disks afterwards)
2. Disks are export to a file
3. File is converted to qcow2 format
4. Disk files are upload to the backup repo
TODO:
* Confirm crash consistent snapshots cannot be used in Ceph
TODO:
* Check if using VM dir instead of full path is better to accomodate
DS migrations i.e.:
- Current path: /var/lib/one/datastores/100/53/backup/disk.0
- Proposal: 53/backup/disk.0
RESTIC DRIVER
=============
Developed together with this feature is part of the EE edtion.
* It supports the SFTP protocol, the following attributes are
supported:
- RESTIC_SFTP_SERVER
- RESTIC_SFTP_USER: only if different from oneadmin
- RESTIC_PASSWORD
- RESTIC_IONICE: Run restic under a given ionice priority (class 2)
- RESTIC_NICE: Run restic under a given nice
- RESTIC_BWLIMIT: Limit restic upload/download BW
- RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes:
off, auto, max). This requires repositories version 2. By default,
auto is used (average compression without to much CPU usage)
- RESTIC_CONNECTIONS: Sets the number of concurrent connections to a
backend (5 by default). For high-latency backends this number can be
increased.
* downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name>
snapshot_id is the restic snapshot hash. To recover single disk images
from a backup. This URLs support:
- RESTIC_CONNECTIONS
- RESTIC_BWLIMIT
- RESTIC_IONICE
- RESTIC_NICE
These options needs to be defined in the associated datastore.
RSYNC DRIVER
=============
A rsync driver is included as part of the CE distribution. It uses the
rsync tool to store backups in a remote server through SSH:
* The following attributes are supported to configure the backup
datastore:
- RSYNC_HOST
- RSYNC_USER
- RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default)
* downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover
single files from an existing backup. (RSYNC_HOST and RSYN_USER needs
to be set in ds_id
EMULATOR_CPUS
=============
This commit includes a non related backup feature:
* Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the
CPU IDs where the emulator threads will be pinned. If this value is
not defined the allocated CPU wll be used when using a PIN policy.
(cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b)
F OpenNebula/one#5516: adding rsync backup driver
(cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305)
F OpenNebula/one#5516: update install.sh, add vmid to source, some polish
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d)
F OpenNebula/one#5516: cleanup
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340)
F OpenNebula/one#5516: update downloader, default args, size check
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305)
LL
(cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
TM_LIB_FILES:$VAR_LOCATION /remotes/tm/lib
2012-09-24 18:18:17 +04:00
TM_SHARED_FILES:$VAR_LOCATION /remotes/tm/shared
2013-11-11 20:43:57 +04:00
TM_FS_LVM_FILES:$VAR_LOCATION /remotes/tm/fs_lvm
2019-02-04 17:55:54 +03:00
TM_FS_LVM_ETC_FILES:$VAR_LOCATION /remotes/etc/tm/fs_lvm/fs_lvm.conf
2021-05-12 11:57:01 +03:00
TM_FS_LVM_SSH_FILES:$VAR_LOCATION /remotes/tm/fs_lvm_ssh
2012-09-24 18:18:17 +04:00
TM_QCOW2_FILES:$VAR_LOCATION /remotes/tm/qcow2
TM_SSH_FILES:$VAR_LOCATION /remotes/tm/ssh
2020-10-06 10:33:58 +03:00
TM_SSH_ETC_FILES:$VAR_LOCATION /remotes/etc/tm/ssh
2013-02-14 21:55:37 +04:00
TM_CEPH_FILES:$VAR_LOCATION /remotes/tm/ceph
2014-06-17 21:31:53 +04:00
TM_DEV_FILES:$VAR_LOCATION /remotes/tm/dev
2016-05-09 13:09:07 +03:00
TM_ISCSI_FILES:$VAR_LOCATION /remotes/tm/iscsi_libvirt
2012-09-24 18:18:17 +04:00
TM_DUMMY_FILES:$VAR_LOCATION /remotes/tm/dummy
2016-01-08 15:18:22 +03:00
TM_VCENTER_FILES:$VAR_LOCATION /remotes/tm/vcenter
2012-09-24 18:18:17 +04:00
DATASTORE_DRIVER_COMMON_SCRIPTS:$VAR_LOCATION /remotes/datastore/
DATASTORE_DRIVER_DUMMY_SCRIPTS:$VAR_LOCATION /remotes/datastore/dummy
DATASTORE_DRIVER_FS_SCRIPTS:$VAR_LOCATION /remotes/datastore/fs
2019-04-09 17:11:15 +03:00
DATASTORE_DRIVER_ETC_FS_SCRIPTS:$VAR_LOCATION /remotes/etc/datastore/fs
2013-02-14 21:55:37 +04:00
DATASTORE_DRIVER_CEPH_SCRIPTS:$VAR_LOCATION /remotes/datastore/ceph
2018-01-18 14:27:38 +03:00
DATASTORE_DRIVER_ETC_CEPH_SCRIPTS:$VAR_LOCATION /remotes/etc/datastore/ceph
2014-06-17 21:31:53 +04:00
DATASTORE_DRIVER_DEV_SCRIPTS:$VAR_LOCATION /remotes/datastore/dev
2016-01-07 20:59:38 +03:00
DATASTORE_DRIVER_VCENTER_SCRIPTS:$VAR_LOCATION /remotes/datastore/vcenter
2016-05-09 13:09:07 +03:00
DATASTORE_DRIVER_ISCSI_SCRIPTS:$VAR_LOCATION /remotes/datastore/iscsi_libvirt
F #5516: New backup interface for OpenNebula
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Neal Hansen <nhansen@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io>
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
BACKUP INTERFACE
=================
* Backups are exposed through a a special Datastore (BACKUP_DS) and
Image (BACKUP) types. These new types can only be used for backup'ing
up VMs. This approach allows to:
- Implement tier based backup policies (backups made on different
locations).
- Leverage access control and quota systems
- Support differnt storage and backup technologies
* Backup interface for the VMs:
- VM configures backups with BACKUP_CONFIG. This attribute can be set
in the VM template or updated with updateconf API call. It can include:
+ BACKUP_VOLATILE: To backup or not volatile disks
+ FS_FREEZE: How the FS is freeze for running VMs (qemu-agent,
suspend or none). When possible backups are crash consistent.
+ KEEP_LAST: keep only a given number of backups.
- Backups are initiated by the one.vm.backup API call that requires
the target Datastore to perform the backup (one-shot). This is
exposed by the onevm backup command.
- Backups can be periodic through scheduled actions.
- Backup configuration is updated with one.vm.updateconf API call.
* Restore interface:
- Restores are initiated by the one.image.restore API call. This is
exposed by oneimage restore command.
- Restore include configurable options for the VM template
+ NO_IP: to not preserve IP addresses (but keep the NICs and network
mapping)
+ NO_NIC: to not preserve network mappings
- Other template attributes:
+ Clean PCI devices, including network configuration in case of TYPE=NIC
attributes. By default it removes SHORT_ADDRESS and leave the "auto"
selection attributes.
+ Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node
- It is possible to restore single files stored in the repository by
using the backup specific URL.
* Sunstone (Ruby version) has been updated to expose this feautres.
BACKUP DRIVERS & IMPLEMENTATION
===============================
* Backup operation is implemented by a combination of 3 driver operations:
- VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate
backups for RUNNING VMs.
- TM. This commit introduces 2 new operations (and their
corresponding _live variants):
+ pre_backup(_live): Prepares the disks to be back'ed up in the
repository. It is specific to the driver: (i) ceph uses the export
operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as
needed.
+ post_backup(_live): Performs cleanning operations, i.e. KVM
snapshots or tmp dirs.
- DATASTORE. Each backup technology is represented by its
corresponfing driver, that needs to implement:
+ backup: it takes the VM disks in file (qcow2) format and stores it
the backup repository.
+ restore: it takes a backup image and restores the associated disks
and VM template.
+ monitor: to gather available space in the repository
+ rm: to remove existing backups
+ stat: to return the "restored" size of a disk stored in a backup
+ downloader pseudo-URL handler: in the form
<backup_proto>://<driver_snapshot_id>/<disk filename>
BACKUP MANAGEMENT
=================
Backup actions may potentially take some time, leaving some vmm_exec threads in
use for a long time, stucking other vmm operations. Backups are planned
by the scheduler through the sched action interface.
Two attributes has been added to sched.conf:
* MAX_BACKUPS max active backup operations in the cloud. No more
backups will be started beyond this limit.
* MAX_BACKUPS_HOST max number of backups per host
* Fix onevm CLI to properly show and manage schedule actions. --schedule
supports now, as well as relative times +<seconds_from_stime>
onvm backup --schedule now -d 100 63
* Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs
to use the batch interface or request specific permissions
Internal restructure of Scheduler:
- All sched_actions interface is now in SchedActionsXML class and files.
This class uses references to VM XML, and MUST be used in the same
lifetime scope.
- XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as
static functions.
- VirtualMachineActionPool includes counters for active backups (total
and per host).
SUPPORTED PLATFORMS
====================
* hypervisor: KVM
* TM: qcow2/shared/ssh, ceph
* backup: restic, rsync
Notes on Ceph
* Ceph backups are performed in the following steps:
1. A snapshot of each disk is taken (group snapshots cannot be used as
it seems we cannot export the disks afterwards)
2. Disks are export to a file
3. File is converted to qcow2 format
4. Disk files are upload to the backup repo
TODO:
* Confirm crash consistent snapshots cannot be used in Ceph
TODO:
* Check if using VM dir instead of full path is better to accomodate
DS migrations i.e.:
- Current path: /var/lib/one/datastores/100/53/backup/disk.0
- Proposal: 53/backup/disk.0
RESTIC DRIVER
=============
Developed together with this feature is part of the EE edtion.
* It supports the SFTP protocol, the following attributes are
supported:
- RESTIC_SFTP_SERVER
- RESTIC_SFTP_USER: only if different from oneadmin
- RESTIC_PASSWORD
- RESTIC_IONICE: Run restic under a given ionice priority (class 2)
- RESTIC_NICE: Run restic under a given nice
- RESTIC_BWLIMIT: Limit restic upload/download BW
- RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes:
off, auto, max). This requires repositories version 2. By default,
auto is used (average compression without to much CPU usage)
- RESTIC_CONNECTIONS: Sets the number of concurrent connections to a
backend (5 by default). For high-latency backends this number can be
increased.
* downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name>
snapshot_id is the restic snapshot hash. To recover single disk images
from a backup. This URLs support:
- RESTIC_CONNECTIONS
- RESTIC_BWLIMIT
- RESTIC_IONICE
- RESTIC_NICE
These options needs to be defined in the associated datastore.
RSYNC DRIVER
=============
A rsync driver is included as part of the CE distribution. It uses the
rsync tool to store backups in a remote server through SSH:
* The following attributes are supported to configure the backup
datastore:
- RSYNC_HOST
- RSYNC_USER
- RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default)
* downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover
single files from an existing backup. (RSYNC_HOST and RSYN_USER needs
to be set in ds_id
EMULATOR_CPUS
=============
This commit includes a non related backup feature:
* Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the
CPU IDs where the emulator threads will be pinned. If this value is
not defined the allocated CPU wll be used when using a PIN policy.
(cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b)
F OpenNebula/one#5516: adding rsync backup driver
(cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305)
F OpenNebula/one#5516: update install.sh, add vmid to source, some polish
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d)
F OpenNebula/one#5516: cleanup
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340)
F OpenNebula/one#5516: update downloader, default args, size check
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305)
LL
(cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
DATASTORE_DRIVER_RSYNC_SCRIPTS:$VAR_LOCATION /remotes/datastore/rsync
2020-09-25 13:08:42 +03:00
DATASTORE_DRIVER_ETC_SCRIPTS:$VAR_LOCATION /remotes/etc/datastore
2015-12-17 14:39:27 +03:00
MARKETPLACE_DRIVER_HTTP_SCRIPTS:$VAR_LOCATION /remotes/market/http
2019-04-09 17:11:15 +03:00
MARKETPLACE_DRIVER_ETC_HTTP_SCRIPTS:$VAR_LOCATION /remotes/etc/market/http
2015-12-24 20:49:21 +03:00
MARKETPLACE_DRIVER_ONE_SCRIPTS:$VAR_LOCATION /remotes/market/one
2016-01-25 18:42:59 +03:00
MARKETPLACE_DRIVER_S3_SCRIPTS:$VAR_LOCATION /remotes/market/s3
2020-05-18 04:01:50 +03:00
MARKETPLACE_DRIVER_COMMON_SCRIPTS:$VAR_LOCATION /remotes/market/common
2018-12-11 14:21:43 +03:00
MARKETPLACE_DRIVER_LXC_SCRIPTS:$VAR_LOCATION /remotes/market/linuxcontainers
2020-02-07 12:42:58 +03:00
MARKETPLACE_DRIVER_TK_SCRIPTS:$VAR_LOCATION /remotes/market/turnkeylinux
2020-05-10 21:14:20 +03:00
MARKETPLACE_DRIVER_DH_SCRIPTS:$VAR_LOCATION /remotes/market/dockerhub
2021-05-31 14:27:53 +03:00
MARKETPLACE_DRIVER_REGISTRY_SCRIPTS:$VAR_LOCATION /remotes/market/docker_registry
2016-08-19 19:24:32 +03:00
IPAM_DRIVER_DUMMY_SCRIPTS:$VAR_LOCATION /remotes/ipam/dummy
2021-09-17 10:51:14 +03:00
IPAM_DRIVER_EQUINIX_SCRIPTS:$VAR_LOCATION /remotes/ipam/equinix
2021-05-19 13:24:19 +03:00
IPAM_DRIVER_VULTR_SCRIPTS:$VAR_LOCATION /remotes/ipam/vultr
2020-12-15 12:07:06 +03:00
IPAM_DRIVER_EC2_SCRIPTS:$VAR_LOCATION /remotes/ipam/aws
2012-09-24 18:18:17 +04:00
NETWORK_FILES:$VAR_LOCATION /remotes/vnm
2020-03-05 18:36:06 +03:00
NETWORK_HOOKS_PRE_FILES:$VAR_LOCATION /remotes/vnm/hooks/pre
NETWORK_HOOKS_CLEAN_FILES:$VAR_LOCATION /remotes/vnm/hooks/clean
2018-01-18 14:27:38 +03:00
NETWORK_ETC_FILES:$VAR_LOCATION /remotes/etc/vnm
2012-09-24 18:18:17 +04:00
NETWORK_8021Q_FILES:$VAR_LOCATION /remotes/vnm/802.1Q
2015-01-05 04:31:03 +03:00
NETWORK_VXLAN_FILES:$VAR_LOCATION /remotes/vnm/vxlan
2012-09-24 18:18:17 +04:00
NETWORK_DUMMY_FILES:$VAR_LOCATION /remotes/vnm/dummy
2018-05-16 16:24:37 +03:00
NETWORK_BRIDGE_FILES:$VAR_LOCATION /remotes/vnm/bridge
2012-09-24 18:18:17 +04:00
NETWORK_EBTABLES_FILES:$VAR_LOCATION /remotes/vnm/ebtables
NETWORK_FW_FILES:$VAR_LOCATION /remotes/vnm/fw
NETWORK_OVSWITCH_FILES:$VAR_LOCATION /remotes/vnm/ovswitch
2018-02-28 19:31:38 +03:00
NETWORK_OVSWITCH_VXLAN_FILES:$VAR_LOCATION /remotes/vnm/ovswitch_vxlan
2017-03-30 13:14:49 +03:00
NETWORK_VCENTER_FILES:$VAR_LOCATION /remotes/vnm/vcenter
2020-12-15 12:07:06 +03:00
NETWORK_ELASTIC_FILES:$VAR_LOCATION /remotes/vnm/elastic
2021-04-30 12:49:51 +03:00
NETWORK_NODEPORT_FILES:$VAR_LOCATION /remotes/vnm/nodeport
2012-09-24 18:18:17 +04:00
EXAMPLE_SHARE_FILES:$SHARE_LOCATION /examples
2019-09-23 16:16:05 +03:00
EXAMPLE_HOST_HOOKS_SHARE_FILES:$SHARE_LOCATION /examples/host_hooks
2023-09-14 13:11:14 +03:00
EXAMPLE_EXTERNAL_SCHED_FILES:$SHARE_LOCATION /examples/external_scheduler
2019-10-02 17:49:32 +03:00
LXD_NETWORK_HOOKS:$SHARE_LOCATION /examples/network_hooks
2019-09-19 12:28:36 +03:00
WEBSOCKIFY_SHARE_RUN_FILES:$SHARE_LOCATION /websockify
WEBSOCKIFY_SHARE_MODULE_FILES:$SHARE_LOCATION /websockify/websockify
2016-09-12 20:15:30 +03:00
INSTALL_GEMS_SHARE_FILES:$SHARE_LOCATION
2016-09-01 18:34:56 +03:00
ONETOKEN_SHARE_FILE:$SHARE_LOCATION
2017-06-23 00:45:21 +03:00
FOLLOWER_CLEANUP_SHARE_FILE:$SHARE_LOCATION
2022-06-16 19:57:07 +03:00
PRE_CLEANUP_SHARE_FILE:$SHARE_LOCATION
2021-07-22 17:29:26 +03:00
HOOK_AUTOSTART_FILES:$VAR_LOCATION /remotes/hooks/autostart
2012-09-24 18:18:17 +04:00
HOOK_FT_FILES:$VAR_LOCATION /remotes/hooks/ft
2017-05-18 17:41:18 +03:00
HOOK_RAFT_FILES:$VAR_LOCATION /remotes/hooks/raft
2012-09-24 18:18:17 +04:00
COMMON_CLOUD_LIB_FILES:$LIB_LOCATION /ruby/cloud
CLOUD_AUTH_LIB_FILES:$LIB_LOCATION /ruby/cloud/CloudAuth
MAN_FILES:$MAN_LOCATION
2015-02-24 17:41:14 +03:00
DOCS_FILES:$DOCS_LOCATION
2012-09-24 18:18:17 +04:00
CLI_LIB_FILES:$LIB_LOCATION /ruby/cli
ONE_CLI_LIB_FILES:$LIB_LOCATION /ruby/cli/one_helper
2016-03-10 19:05:59 +03:00
VENDOR_DIRS:$LIB_LOCATION /ruby/vendors
2020-04-01 16:22:32 +03:00
START_SCRIPT_SHARE_FILES:$SHARE_LOCATION /start-scripts
2020-05-18 03:23:29 +03:00
LIBVIRT_RNG_SHARE_MODULE_FILES:$SHARE_LOCATION /schemas/libvirt
2020-07-14 18:42:30 +03:00
XSD_FILES:$SHARE_LOCATION /schemas/xsd
2020-05-15 19:51:04 +03:00
SSH_SH_LIB_FILES:$LIB_LOCATION /sh
SSH_SH_OVERRIDE_LIB_FILES:$LIB_LOCATION /sh/override
SSH_SHARE_FILES:$SHARE_LOCATION /ssh
2020-06-25 12:24:51 +03:00
CONTEXT_SHARE:$SHARE_LOCATION /context
2020-11-27 13:11:14 +03:00
DOCKERFILE_TEMPLATE:$SHARE_LOCATION /dockerhub
DOCKERFILES_TEMPLATES:$SHARE_LOCATION /dockerhub/dockerfiles
2011-02-10 20:16:18 +03:00
)
INSTALL_CLIENT_FILES = (
2012-09-24 18:18:17 +04:00
COMMON_CLOUD_CLIENT_LIB_FILES:$LIB_LOCATION /ruby/cloud
COMMON_CLOUD_CLIENT_LIB_FILES:$LIB_LOCATION /ruby/cloud
CLI_BIN_FILES:$BIN_LOCATION
CLI_LIB_FILES:$LIB_LOCATION /ruby/cli
ONE_CLI_LIB_FILES:$LIB_LOCATION /ruby/cli/one_helper
CLI_CONF_FILES:$ETC_LOCATION /cli
OCA_LIB_FILES:$LIB_LOCATION /ruby
2012-12-18 19:14:47 +04:00
RUBY_OPENNEBULA_LIB_FILES:$LIB_LOCATION /ruby/opennebula
2020-09-25 11:15:19 +03:00
RUBY_OPENNEBULA_LIB_FLOW_FILES:$LIB_LOCATION /ruby/opennebula/flow
2013-04-16 14:14:55 +04:00
RUBY_AUTH_LIB_FILES:$LIB_LOCATION /ruby/opennebula
2011-02-10 20:16:18 +03:00
)
2018-11-29 17:14:17 +03:00
INSTALL_ONEPROVISION_FILES = (
ONEPROVISION_BIN_FILES:$BIN_LOCATION
2019-01-18 18:56:48 +03:00
ONEPROVISION_ONE_LIB_FILES:$LIB_LOCATION /ruby/cli/one_helper
2018-11-29 17:14:17 +03:00
ONEPROVISION_CONF_FILES:$ETC_LOCATION /cli
ONEPROVISION_ANSIBLE_FILES:$SHARE_LOCATION /oneprovision
2021-02-02 14:49:40 +03:00
ONEPROVISION_TEMPLATES_FILES:$SHARE_LOCATION /oneprovision
2019-01-18 18:56:48 +03:00
ONEPROVISION_LIB_FILES:$LIB_LOCATION /oneprovision/lib
2021-05-19 17:46:47 +03:00
ONEPROVISION_LIB_API_FILES:$LIB_LOCATION /oneprovision/provider_apis
ONEPROVISION_LIB_API_VULTR_FILES:$LIB_LOCATION /oneprovision/provider_apis/vultr
2020-10-28 17:11:34 +03:00
ONEPROVISION_LIB_TF_FILES:$LIB_LOCATION /oneprovision/lib/terraform
ONEPROVISION_LIB_PROVIDERS_FILES:$LIB_LOCATION /oneprovision/lib/terraform/providers
2020-11-06 13:09:28 +03:00
ONEPROVISION_LIB_AWS_ERB_FILES:$LIB_LOCATION /oneprovision/lib/terraform/providers/templates/aws
2021-04-30 12:49:51 +03:00
ONEPROVISION_LIB_GOOGLE_ERB_FILES:$LIB_LOCATION /oneprovision/lib/terraform/providers/templates/google
ONEPROVISION_LIB_DIGITALOCEAN_ERB_FILES:$LIB_LOCATION /oneprovision/lib/terraform/providers/templates/digitalocean
2021-09-17 10:51:14 +03:00
ONEPROVISION_LIB_EQUINIX_ERB_FILES:$LIB_LOCATION /oneprovision/lib/terraform/providers/templates/equinix
2021-05-19 13:24:19 +03:00
ONEPROVISION_LIB_VULTR_METAL_ERB_FILES:$LIB_LOCATION /oneprovision/lib/terraform/providers/templates/vultr_metal
ONEPROVISION_LIB_VULTR_VIRTUAL_ERB_FILES:$LIB_LOCATION /oneprovision/lib/terraform/providers/templates/vultr_virtual
2020-10-13 14:38:19 +03:00
ONEPROVISION_LIB_PROVISION_FILES:$LIB_LOCATION /oneprovision/lib/provision
ONEPROVISION_LIB_RESOURCES_FILES:$LIB_LOCATION /oneprovision/lib/provision/resources
ONEPROVISION_LIB_PHYSICAL_R_FILES:$LIB_LOCATION /oneprovision/lib/provision/resources/physical
ONEPROVISION_LIB_VIRTUAL_R_FILES:$LIB_LOCATION /oneprovision/lib/provision/resources/virtual
ONEPROVISION_LIB_PROVIDER_FILES:$LIB_LOCATION /oneprovision/lib/provider
2018-11-29 17:14:17 +03:00
)
2020-11-25 13:59:47 +03:00
INSTALL_ONECFG_FILES = (
ONECFG_BIN_FILES:$BIN_LOCATION
ONECFG_LIB_FILES:$LIB_LOCATION /onecfg/lib
ONECFG_LIB_COMMON_FILES:$LIB_LOCATION /onecfg/lib/common
ONECFG_LIB_COMMON_HELPERS_FILES:$LIB_LOCATION /onecfg/lib/common/helpers
ONECFG_LIB_COMMON_LOGGER_FILES:$LIB_LOCATION /onecfg/lib/common/logger
ONECFG_LIB_CONFIG_FILES:$LIB_LOCATION /onecfg/lib/config
ONECFG_LIB_CONFIG_TYPE_FILES:$LIB_LOCATION /onecfg/lib/config/type
ONECFG_LIB_CONFIG_TYPE_AUGEAS_FILES:$LIB_LOCATION /onecfg/lib/config/type/augeas
ONECFG_LIB_CONFIG_TYPE_YAML_FILES:$LIB_LOCATION /onecfg/lib/config/type/yaml
2021-01-05 12:38:14 +03:00
ONECFG_LIB_PATCH_FILES:$LIB_LOCATION /onecfg/lib/patch
2020-11-25 13:59:47 +03:00
ONECFG_SHARE_ETC_FILES:$SHARE_LOCATION /onecfg/etc
)
2011-02-24 14:28:57 +03:00
INSTALL_SUNSTONE_RUBY_FILES = (
2012-12-18 19:14:47 +04:00
RUBY_OPENNEBULA_LIB_FILES:$LIB_LOCATION /ruby/opennebula
2020-09-25 11:15:19 +03:00
RUBY_OPENNEBULA_LIB_FLOW_FILES:$LIB_LOCATION /ruby/opennebula/flow
2012-09-24 18:18:17 +04:00
OCA_LIB_FILES:$LIB_LOCATION /ruby
2011-02-24 14:28:57 +03:00
)
2011-02-23 19:27:17 +03:00
INSTALL_SUNSTONE_FILES = (
2012-09-24 18:18:17 +04:00
SUNSTONE_FILES:$SUNSTONE_LOCATION
SUNSTONE_BIN_FILES:$BIN_LOCATION
SUNSTONE_MODELS_FILES:$SUNSTONE_LOCATION /models
SUNSTONE_MODELS_JSON_FILES:$SUNSTONE_LOCATION /models/OpenNebulaJSON
SUNSTONE_VIEWS_FILES:$SUNSTONE_LOCATION /views
2013-07-11 18:01:01 +04:00
SUNSTONE_ROUTES_FILES:$SUNSTONE_LOCATION /routes
2020-09-10 14:36:24 +03:00
SUNSTONE_SERVICES_FILES:$SUNSTONE_LOCATION /services
2015-07-17 16:48:30 +03:00
)
INSTALL_SUNSTONE_PUBLIC_MINIFIED_FILES = (
SUNSTONE_PUBLIC_JS_FILES:$SUNSTONE_LOCATION /public/dist
SUNSTONE_PUBLIC_JS_CONSOLE_FILES:$SUNSTONE_LOCATION /public/dist/console
2018-05-10 17:17:07 +03:00
SUNSTONE_PUBLIC_FONT_AWSOME:$SUNSTONE_LOCATION /public/bower_components/fontawesome/web-fonts-with-css/webfonts
2015-07-17 16:48:30 +03:00
SUNSTONE_PUBLIC_CSS_FILES:$SUNSTONE_LOCATION /public/css
SUNSTONE_PUBLIC_IMAGES_FILES:$SUNSTONE_LOCATION /public/images
SUNSTONE_PUBLIC_LOGOS_FILES:$SUNSTONE_LOCATION /public/images/logos
SUNSTONE_PUBLIC_LOCALE_CA:$SUNSTONE_LOCATION /public/locale/languages
SUNSTONE_PUBLIC_LOCALE_CS_CZ:$SUNSTONE_LOCATION /public/locale/languages
SUNSTONE_PUBLIC_LOCALE_DE:$SUNSTONE_LOCATION /public/locale/languages
SUNSTONE_PUBLIC_LOCALE_DA:$SUNSTONE_LOCATION /public/locale/languages
SUNSTONE_PUBLIC_LOCALE_EN_US:$SUNSTONE_LOCATION /public/locale/languages
SUNSTONE_PUBLIC_LOCALE_ES_ES:$SUNSTONE_LOCATION /public/locale/languages
SUNSTONE_PUBLIC_LOCALE_FA_IR:$SUNSTONE_LOCATION /public/locale/languages
SUNSTONE_PUBLIC_LOCALE_FR_FR:$SUNSTONE_LOCATION /public/locale/languages
SUNSTONE_PUBLIC_LOCALE_IT_IT:$SUNSTONE_LOCATION /public/locale/languages
SUNSTONE_PUBLIC_LOCALE_JA:$SUNSTONE_LOCATION /public/locale/languages
SUNSTONE_PUBLIC_LOCALE_LT_LT:$SUNSTONE_LOCATION /public/locale/languages
SUNSTONE_PUBLIC_LOCALE_NL_NL:$SUNSTONE_LOCATION /public/locale/languages
SUNSTONE_PUBLIC_LOCALE_PL:$SUNSTONE_LOCATION /public/locale/languages
SUNSTONE_PUBLIC_LOCALE_PT_PT:$SUNSTONE_LOCATION /public/locale/languages
SUNSTONE_PUBLIC_LOCALE_PT_BR:$SUNSTONE_LOCATION /public/locale/languages
SUNSTONE_PUBLIC_LOCALE_RU_RU:$SUNSTONE_LOCATION /public/locale/languages
SUNSTONE_PUBLIC_LOCALE_SK_SK:$SUNSTONE_LOCATION /public/locale/languages
SUNSTONE_PUBLIC_LOCALE_ZH_CN:$SUNSTONE_LOCATION /public/locale/languages
2018-02-12 19:35:35 +03:00
SUNSTONE_PUBLIC_LOCALE_TR_TR:$SUNSTONE_LOCATION /public/locale/languages
2015-07-17 16:48:30 +03:00
)
INSTALL_SUNSTONE_PUBLIC_DEV_DIR = (
SUNSTONE_PUBLIC_DEV_DIR:$SUNSTONE_LOCATION
2011-02-23 19:27:17 +03:00
)
2011-06-21 19:15:55 +04:00
INSTALL_SUNSTONE_ETC_FILES = (
2012-09-24 18:18:17 +04:00
SUNSTONE_ETC_FILES:$ETC_LOCATION
2018-01-19 15:24:16 +03:00
SUNSTONE_ETC_VIEW_KVM:$ETC_LOCATION /sunstone-views/kvm
SUNSTONE_ETC_VIEW_VCENTER:$ETC_LOCATION /sunstone-views/vcenter
SUNSTONE_ETC_VIEW_MIXED:$ETC_LOCATION /sunstone-views/mixed
2011-06-21 19:15:55 +04:00
)
2020-10-08 17:04:59 +03:00
INSTALL_FIREEDGE_FILES = (
2020-07-02 17:12:39 +03:00
FIREEDGE_MINIFIED_FILES:$FIREEDGE_LOCATION
2021-09-16 19:04:11 +03:00
PROVISION_ETC:$ETC_LOCATION /fireedge/provision
PROVISION_ETC_PROVIDERS:$ETC_LOCTION /fireedge/provision/providers.d
2022-04-06 18:40:55 +03:00
PROVISION_ETC_PROVIDERS_EXTRA:$ETC_LOCTION /fireedge/provision/providers.d-extra
2021-06-29 18:57:31 +03:00
SUNSTONE_ETC_VIEWS:$ETC_LOCATION /fireedge/sunstone
SUNSTONE_ETC_VIEWS_ADMIN:$ETC_LOCTION /fireedge/sunstone/admin
SUNSTONE_ETC_VIEWS_USER:$ETC_LOCTION /fireedge/sunstone/user
2020-07-02 17:12:39 +03:00
FIREEDGE_BIN_FILES:$BIN_LOCATION
)
2021-02-02 14:49:40 +03:00
INSTALL_FIREEDGE_ETC_FILES = (
FIREEDGE_ETC_FILES:$ETC_LOCATION
2021-09-16 19:04:11 +03:00
FIREEDGE_PROVISION_ETC:$ETC_LOCATION /fireedge/provision
FIREEDGE_PROVISION_ETC_PROVIDERS:$ETC_LOCATION /fireedge/provision/providers.d
2022-04-06 18:40:55 +03:00
FIREEDGE_PROVISION_ETC_PROVIDERS_EXTRA:$ETC_LOCATION /fireedge/provision/providers.d-extra
2021-06-29 18:57:31 +03:00
FIREEDGE_SUNSTONE_ETC:$ETC_LOCATION /fireedge/sunstone
FIREEDGE_SUNSTONE_ETC_VIEW_ADMIN:$ETC_LOCATION /fireedge/sunstone/admin
FIREEDGE_SUNSTONE_ETC_VIEW_USER:$ETC_LOCATION /fireedge/sunstone/user
2023-11-20 20:00:18 +03:00
FIREEDGE_SUNSTONE_ETC_VIEW_CLOUD:$ETC_LOCATION /fireedge/sunstone/cloud
FIREEDGE_SUNSTONE_ETC_VIEW_GROUPADMIN:$ETC_LOCATION /fireedge/sunstone/groupadmin
2021-02-02 14:49:40 +03:00
)
2020-07-02 17:12:39 +03:00
INSTALL_FIREEDGE_DEV_DIRS = (
FIREEDGE_DEV_FILES:$FIREEDGE_LOCATION
)
2013-06-27 13:36:08 +04:00
INSTALL_ONEGATE_FILES = (
ONEGATE_FILES:$ONEGATE_LOCATION
ONEGATE_BIN_FILES:$BIN_LOCATION
)
INSTALL_ONEGATE_ETC_FILES = (
ONEGATE_ETC_FILES:$ETC_LOCATION
)
2022-11-14 22:48:30 +03:00
INSTALL_ONEGATE_PROXY_FILES = (
ONEGATE_PROXY_FILES:$ONEGATE_PROXY_LOCATION
ONEGATE_PROXY_BIN_FILES:$BIN_LOCATION
)
INSTALL_ONEGATE_PROXY_ETC_FILES = (
ONEGATE_PROXY_REMOTES_ETC_FILES:$VAR_LOCATION /remotes/etc
)
2013-07-10 15:29:53 +04:00
INSTALL_ONEFLOW_FILES = (
ONEFLOW_FILES:$ONEFLOW_LOCATION
ONEFLOW_BIN_FILES:$BIN_LOCATION
ONEFLOW_LIB_FILES:$ONEFLOW_LOCATION /lib
2013-07-10 20:32:05 +04:00
ONEFLOW_LIB_STRATEGY_FILES:$ONEFLOW_LOCATION /lib/strategy
ONEFLOW_LIB_MODELS_FILES:$ONEFLOW_LOCATION /lib/models
2013-07-10 15:29:53 +04:00
)
INSTALL_ONEFLOW_ETC_FILES = (
ONEFLOW_ETC_FILES:$ETC_LOCATION
)
2012-01-02 21:54:40 +04:00
2019-09-09 15:43:51 +03:00
INSTALL_ONEHEM_FILES = (
ONEHEM_FILES:$ONEHEM_LOCATION
ONEHEM_BIN_FILES:$BIN_LOCATION
)
INSTALL_ONEHEM_ETC_FILES = (
ONEHEM_ETC_FILES:$ETC_LOCATION
)
2018-05-09 18:35:44 +03:00
INSTALL_DOCKER_MACHINE_FILES = (
DOCKER_MACHINE_BIN_FILES:$BIN_LOCATION
)
2011-02-10 20:16:18 +03:00
INSTALL_ETC_FILES = (
2012-09-24 18:18:17 +04:00
ETC_FILES:$ETC_LOCATION
2020-06-18 14:19:33 +03:00
ETC_FILES:$SHARE_LOCATION /conf
2013-09-03 20:40:41 +04:00
EC2_ETC_FILES:$ETC_LOCATION
2017-04-26 09:58:25 +03:00
VCENTER_ETC_FILES:$ETC_LOCATION
2014-06-19 19:03:18 +04:00
AZ_ETC_FILES:$ETC_LOCATION
2012-09-24 18:18:17 +04:00
VMM_EXEC_ETC_FILES:$ETC_LOCATION /vmm_exec
HM_ETC_FILES:$ETC_LOCATION /hm
AUTH_ETC_FILES:$ETC_LOCATION /auth
CLI_CONF_FILES:$ETC_LOCATION /cli
2011-02-10 20:16:18 +03:00
)
2009-01-02 17:58:51 +03:00
#-------------------------------------------------------------------------------
# Binary files, to be installed under $BIN_LOCATION
#-------------------------------------------------------------------------------
BIN_FILES = " src/nebula/oned \
2012-09-24 18:18:17 +04:00
src/scheduler/src/sched/mm_sched \
src/cli/onevm \
src/cli/oneacct \
2014-11-07 14:27:54 +03:00
src/cli/oneshowback \
2012-09-24 18:18:17 +04:00
src/cli/onehost \
src/cli/onevnet \
src/cli/oneuser \
src/cli/oneimage \
src/cli/onegroup \
src/cli/onetemplate \
src/cli/oneacl \
src/cli/onedatastore \
src/cli/onecluster \
2013-12-12 22:10:12 +04:00
src/cli/onezone \
2013-07-10 15:29:53 +04:00
src/cli/oneflow \
src/cli/oneflow-template \
2014-09-08 13:59:13 +04:00
src/cli/onesecgroup \
2017-01-03 03:22:10 +03:00
src/cli/onevmgroup \
2014-12-19 19:30:00 +03:00
src/cli/onevdc \
2015-11-30 18:55:22 +03:00
src/cli/onevrouter \
2015-12-06 01:52:28 +03:00
src/cli/onemarket \
2015-12-11 17:53:19 +03:00
src/cli/onemarketapp \
2014-09-22 14:37:35 +04:00
src/cli/onevcenter \
2018-11-21 18:20:29 +03:00
src/cli/onevntemplate \
2019-09-09 15:43:51 +03:00
src/cli/onehook \
2023-07-03 19:15:52 +03:00
src/cli/onebackupjob \
2022-04-19 12:26:22 +03:00
src/cli/onelog \
src/cli/oneirb \
2012-09-24 18:18:17 +04:00
src/onedb/onedb \
2020-12-09 18:30:44 +03:00
share/scripts/qemu-kvm-one-gen \
2012-09-24 18:18:17 +04:00
share/scripts/one"
2009-01-02 17:58:51 +03:00
2020-04-08 11:44:48 +03:00
#-------------------------------------------------------------------------------
# Binary files, to be installed under $SBIN_LOCATION
#-------------------------------------------------------------------------------
SBIN_FILES = "src/vmm_mad/remotes/lib/firecracker/install-firecracker"
2009-01-02 17:58:51 +03:00
#-------------------------------------------------------------------------------
# C/C++ OpenNebula API Library & Development files
# Include files, to be installed under $INCLUDE_LOCATION
# Library files, to be installed under $LIB_LOCATION
#-------------------------------------------------------------------------------
2010-05-17 17:54:46 +04:00
INCLUDE_FILES = ""
LIB_FILES = ""
2009-01-02 17:58:51 +03:00
#-------------------------------------------------------------------------------
# Ruby library files, to be installed under $LIB_LOCATION/ruby
#-------------------------------------------------------------------------------
2010-09-01 01:28:05 +04:00
RUBY_LIB_FILES = " src/mad/ruby/ActionManager.rb \
2012-09-24 18:18:17 +04:00
src/mad/ruby/CommandManager.rb \
src/mad/ruby/OpenNebulaDriver.rb \
src/mad/ruby/VirtualMachineDriver.rb \
2020-04-05 20:50:45 +03:00
src/mad/ruby/PublicCloudDriver.rb \
2022-11-14 22:48:30 +03:00
src/mad/ruby/HostSyncManager.rb \
2012-09-24 18:18:17 +04:00
src/mad/ruby/DriverExecHelper.rb \
src/mad/ruby/ssh_stream.rb \
src/vnm_mad/one_vnm.rb \
2013-03-05 19:19:09 +04:00
src/oca/ruby/opennebula.rb \
2018-05-07 19:42:34 +03:00
src/sunstone/OpenNebulaAddons.rb \
2015-07-17 12:10:21 +03:00
src/vmm_mad/remotes/vcenter/vcenter_driver.rb \
2019-07-31 19:25:22 +03:00
src/vmm_mad/remotes/nsx/nsx_driver.rb \
2015-07-17 12:10:21 +03:00
src/vmm_mad/remotes/az/az_driver.rb \
2018-02-15 18:00:09 +03:00
src/vmm_mad/remotes/ec2/ec2_driver.rb \
2018-11-29 17:14:17 +03:00
src/vmm_mad/remotes/one/opennebula_driver.rb \
2021-09-17 10:51:14 +03:00
src/vmm_mad/remotes/equinix/equinix_driver.rb \
2020-12-15 12:07:06 +03:00
src/vnm_mad/remotes/elastic/aws_vnm.rb \
2021-09-17 10:51:14 +03:00
src/vnm_mad/remotes/elastic/equinix_vnm.rb \
2023-09-18 18:10:34 +03:00
src/vnm_mad/remotes/elastic/equinix.rb \
2021-05-19 13:24:19 +03:00
src/vnm_mad/remotes/elastic/vultr_vnm.rb"
2012-12-20 14:47:39 +04:00
#-------------------------------------------------------------------------------
# Ruby auth library files, to be installed under $LIB_LOCATION/ruby/opennebula
#-------------------------------------------------------------------------------
RUBY_AUTH_LIB_FILES = " src/authm_mad/remotes/ssh/ssh_auth.rb \
2012-09-24 18:18:17 +04:00
src/authm_mad/remotes/server_x509/server_x509_auth.rb \
src/authm_mad/remotes/server_cipher/server_cipher_auth.rb \
src/authm_mad/remotes/ldap/ldap_auth.rb \
src/authm_mad/remotes/x509/x509_auth.rb"
2009-01-02 17:58:51 +03:00
2011-04-13 02:15:12 +04:00
#-----------------------------------------------------------------------------
2011-05-10 18:08:05 +04:00
# MAD Script library files, to be installed under $LIB_LOCATION/<script lang>
2011-04-13 02:15:12 +04:00
# and remotes directory
#-----------------------------------------------------------------------------
2022-04-05 16:15:45 +03:00
REMOTE_FILES = "src/vmm_mad/remotes/kvm/vgpu"
2018-12-11 14:21:43 +03:00
MAD_SH_LIB_FILES = " src/mad/sh/scripts_common.sh \
2020-05-18 03:54:11 +03:00
src/mad/sh/create_container_image.sh \
2022-04-05 16:15:45 +03:00
src/mad/sh/create_docker_image.sh"
2018-12-11 14:21:43 +03:00
2011-05-10 18:08:05 +04:00
MAD_RUBY_LIB_FILES = "src/mad/ruby/scripts_common.rb"
2011-04-13 02:15:12 +04:00
2009-01-02 17:58:51 +03:00
#-------------------------------------------------------------------------------
# Driver executable files, to be installed under $LIB_LOCATION/mads
#-------------------------------------------------------------------------------
2009-09-25 20:03:03 +04:00
MADS_LIB_FILES = " src/mad/sh/madcommon.sh \
2012-09-24 18:18:17 +04:00
src/vmm_mad/exec/one_vmm_exec.rb \
src/vmm_mad/exec/one_vmm_exec \
src/vmm_mad/exec/one_vmm_sh \
src/vmm_mad/exec/one_vmm_ssh \
src/vmm_mad/dummy/one_vmm_dummy.rb \
src/vmm_mad/dummy/one_vmm_dummy \
src/im_mad/im_exec/one_im_exec.rb \
src/im_mad/im_exec/one_im_exec \
src/im_mad/im_exec/one_im_ssh \
src/im_mad/im_exec/one_im_sh \
2020-03-04 18:05:57 +03:00
src/monitor/src/monitor/onemonitord \
2012-09-24 18:18:17 +04:00
src/tm_mad/one_tm \
src/tm_mad/one_tm.rb \
src/hm_mad/one_hm.rb \
src/hm_mad/one_hm \
src/authm_mad/one_auth_mad.rb \
src/authm_mad/one_auth_mad \
src/datastore_mad/one_datastore.rb \
F #5516: New backup interface for OpenNebula
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Neal Hansen <nhansen@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io>
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
BACKUP INTERFACE
=================
* Backups are exposed through a a special Datastore (BACKUP_DS) and
Image (BACKUP) types. These new types can only be used for backup'ing
up VMs. This approach allows to:
- Implement tier based backup policies (backups made on different
locations).
- Leverage access control and quota systems
- Support differnt storage and backup technologies
* Backup interface for the VMs:
- VM configures backups with BACKUP_CONFIG. This attribute can be set
in the VM template or updated with updateconf API call. It can include:
+ BACKUP_VOLATILE: To backup or not volatile disks
+ FS_FREEZE: How the FS is freeze for running VMs (qemu-agent,
suspend or none). When possible backups are crash consistent.
+ KEEP_LAST: keep only a given number of backups.
- Backups are initiated by the one.vm.backup API call that requires
the target Datastore to perform the backup (one-shot). This is
exposed by the onevm backup command.
- Backups can be periodic through scheduled actions.
- Backup configuration is updated with one.vm.updateconf API call.
* Restore interface:
- Restores are initiated by the one.image.restore API call. This is
exposed by oneimage restore command.
- Restore include configurable options for the VM template
+ NO_IP: to not preserve IP addresses (but keep the NICs and network
mapping)
+ NO_NIC: to not preserve network mappings
- Other template attributes:
+ Clean PCI devices, including network configuration in case of TYPE=NIC
attributes. By default it removes SHORT_ADDRESS and leave the "auto"
selection attributes.
+ Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node
- It is possible to restore single files stored in the repository by
using the backup specific URL.
* Sunstone (Ruby version) has been updated to expose this feautres.
BACKUP DRIVERS & IMPLEMENTATION
===============================
* Backup operation is implemented by a combination of 3 driver operations:
- VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate
backups for RUNNING VMs.
- TM. This commit introduces 2 new operations (and their
corresponding _live variants):
+ pre_backup(_live): Prepares the disks to be back'ed up in the
repository. It is specific to the driver: (i) ceph uses the export
operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as
needed.
+ post_backup(_live): Performs cleanning operations, i.e. KVM
snapshots or tmp dirs.
- DATASTORE. Each backup technology is represented by its
corresponfing driver, that needs to implement:
+ backup: it takes the VM disks in file (qcow2) format and stores it
the backup repository.
+ restore: it takes a backup image and restores the associated disks
and VM template.
+ monitor: to gather available space in the repository
+ rm: to remove existing backups
+ stat: to return the "restored" size of a disk stored in a backup
+ downloader pseudo-URL handler: in the form
<backup_proto>://<driver_snapshot_id>/<disk filename>
BACKUP MANAGEMENT
=================
Backup actions may potentially take some time, leaving some vmm_exec threads in
use for a long time, stucking other vmm operations. Backups are planned
by the scheduler through the sched action interface.
Two attributes has been added to sched.conf:
* MAX_BACKUPS max active backup operations in the cloud. No more
backups will be started beyond this limit.
* MAX_BACKUPS_HOST max number of backups per host
* Fix onevm CLI to properly show and manage schedule actions. --schedule
supports now, as well as relative times +<seconds_from_stime>
onvm backup --schedule now -d 100 63
* Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs
to use the batch interface or request specific permissions
Internal restructure of Scheduler:
- All sched_actions interface is now in SchedActionsXML class and files.
This class uses references to VM XML, and MUST be used in the same
lifetime scope.
- XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as
static functions.
- VirtualMachineActionPool includes counters for active backups (total
and per host).
SUPPORTED PLATFORMS
====================
* hypervisor: KVM
* TM: qcow2/shared/ssh, ceph
* backup: restic, rsync
Notes on Ceph
* Ceph backups are performed in the following steps:
1. A snapshot of each disk is taken (group snapshots cannot be used as
it seems we cannot export the disks afterwards)
2. Disks are export to a file
3. File is converted to qcow2 format
4. Disk files are upload to the backup repo
TODO:
* Confirm crash consistent snapshots cannot be used in Ceph
TODO:
* Check if using VM dir instead of full path is better to accomodate
DS migrations i.e.:
- Current path: /var/lib/one/datastores/100/53/backup/disk.0
- Proposal: 53/backup/disk.0
RESTIC DRIVER
=============
Developed together with this feature is part of the EE edtion.
* It supports the SFTP protocol, the following attributes are
supported:
- RESTIC_SFTP_SERVER
- RESTIC_SFTP_USER: only if different from oneadmin
- RESTIC_PASSWORD
- RESTIC_IONICE: Run restic under a given ionice priority (class 2)
- RESTIC_NICE: Run restic under a given nice
- RESTIC_BWLIMIT: Limit restic upload/download BW
- RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes:
off, auto, max). This requires repositories version 2. By default,
auto is used (average compression without to much CPU usage)
- RESTIC_CONNECTIONS: Sets the number of concurrent connections to a
backend (5 by default). For high-latency backends this number can be
increased.
* downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name>
snapshot_id is the restic snapshot hash. To recover single disk images
from a backup. This URLs support:
- RESTIC_CONNECTIONS
- RESTIC_BWLIMIT
- RESTIC_IONICE
- RESTIC_NICE
These options needs to be defined in the associated datastore.
RSYNC DRIVER
=============
A rsync driver is included as part of the CE distribution. It uses the
rsync tool to store backups in a remote server through SSH:
* The following attributes are supported to configure the backup
datastore:
- RSYNC_HOST
- RSYNC_USER
- RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default)
* downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover
single files from an existing backup. (RSYNC_HOST and RSYN_USER needs
to be set in ds_id
EMULATOR_CPUS
=============
This commit includes a non related backup feature:
* Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the
CPU IDs where the emulator threads will be pinned. If this value is
not defined the allocated CPU wll be used when using a PIN policy.
(cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b)
F OpenNebula/one#5516: adding rsync backup driver
(cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305)
F OpenNebula/one#5516: update install.sh, add vmid to source, some polish
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d)
F OpenNebula/one#5516: cleanup
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340)
F OpenNebula/one#5516: update downloader, default args, size check
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305)
LL
(cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
src/datastore_mad/one_datastore_exec.rb \
2015-12-17 14:39:27 +03:00
src/datastore_mad/one_datastore \
src/market_mad/one_market.rb \
2016-08-19 19:24:32 +03:00
src/market_mad/one_market \
src/ipamm_mad/one_ipam \
src/ipamm_mad/one_ipam.rb"
2010-07-12 19:07:56 +04:00
2020-03-07 20:36:20 +03:00
#-------------------------------------------------------------------------------
# Common library files for VMM drivers
#-------------------------------------------------------------------------------
2021-03-04 23:07:50 +03:00
VMM_EXEC_LIB = " src/vmm_mad/remotes/lib/command.rb \
src/vmm_mad/remotes/lib/xmlparser.rb \
src/vmm_mad/remotes/lib/opennebula_vm.rb"
2020-03-07 20:36:20 +03:00
2017-02-22 18:17:56 +03:00
#-------------------------------------------------------------------------------
# VMM Lib vcenter files, used by the vCenter Driver to be installed in
# $REMOTES_LOCATION/vmm/lib/vcenter
#-------------------------------------------------------------------------------
2019-07-31 19:25:22 +03:00
VMM_EXEC_LIB_VCENTER_FILES = " src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb \
2017-02-22 18:17:56 +03:00
src/vmm_mad/remotes/lib/vcenter_driver/vi_client.rb \
2020-04-21 18:12:41 +03:00
src/vmm_mad/remotes/lib/vcenter_driver/rest_client.rb \
2018-03-23 17:10:20 +03:00
src/vmm_mad/remotes/lib/vcenter_driver/vcenter_importer.rb \
2017-02-22 18:17:56 +03:00
src/vmm_mad/remotes/lib/vcenter_driver/file_helper.rb \
src/vmm_mad/remotes/lib/vcenter_driver/host.rb \
src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb \
src/vmm_mad/remotes/lib/vcenter_driver/vi_helper.rb \
src/vmm_mad/remotes/lib/vcenter_driver/memoize.rb \
src/vmm_mad/remotes/lib/vcenter_driver/datacenter.rb \
2018-08-29 03:27:07 +03:00
src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb \
2019-05-09 02:12:32 +03:00
src/vmm_mad/remotes/lib/vcenter_driver/network.rb \
2019-05-09 13:33:50 +03:00
src/vmm_mad/remotes/lib/vcenter_driver/vm_folder.rb \
2019-05-14 16:31:21 +03:00
src/vmm_mad/remotes/lib/vcenter_driver/vmm_importer.rb \
2019-05-17 13:45:06 +03:00
src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine_device/vm_device.rb \
src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine_device/vm_disk.rb \
2019-05-17 17:09:14 +03:00
src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine_device/vm_nic.rb \
2020-05-11 21:01:23 +03:00
src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine_helper/vm_helper.rb \
src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine_monitor/vm_monitor.rb"
2017-02-22 18:17:56 +03:00
2019-07-31 19:25:22 +03:00
#-------------------------------------------------------------------------------
# VMM Lib nsx files, used by the NSX Driver to be installed in
# $REMOTES_LOCATION/vmm/lib/nsx
#-------------------------------------------------------------------------------
VMM_EXEC_LIB_NSX_FILES = " src/vmm_mad/remotes/lib/nsx_driver/logical_switch.rb \
src/vmm_mad/remotes/lib/nsx_driver/nsx_client.rb \
2019-11-04 19:32:16 +03:00
src/vmm_mad/remotes/lib/nsx_driver/nsxt_client.rb \
src/vmm_mad/remotes/lib/nsx_driver/nsxv_client.rb \
2019-08-22 12:15:24 +03:00
src/vmm_mad/remotes/lib/nsx_driver/nsx_component.rb \
2019-11-04 19:32:16 +03:00
src/vmm_mad/remotes/lib/nsx_driver/nsx_constants.rb \
2019-11-19 19:35:38 +03:00
src/vmm_mad/remotes/lib/nsx_driver/nsx_error.rb \
2019-07-31 19:25:22 +03:00
src/vmm_mad/remotes/lib/nsx_driver/opaque_network.rb \
2019-08-22 12:15:24 +03:00
src/vmm_mad/remotes/lib/nsx_driver/transport_zone.rb \
2019-11-11 20:47:49 +03:00
src/vmm_mad/remotes/lib/nsx_driver/nsxt_tz.rb \
src/vmm_mad/remotes/lib/nsx_driver/nsxv_tz.rb \
2020-05-07 16:05:20 +03:00
src/vmm_mad/remotes/lib/nsx_driver/virtual_wire.rb \
src/vmm_mad/remotes/lib/nsx_driver/distributed_firewall.rb \
src/vmm_mad/remotes/lib/nsx_driver/nsxt_dfw.rb \
src/vmm_mad/remotes/lib/nsx_driver/nsxv_dfw.rb \
src/vmm_mad/remotes/lib/nsx_driver/logical_port.rb \
src/vmm_mad/remotes/lib/nsx_driver/nsxt_logical_port.rb \
src/vmm_mad/remotes/lib/nsx_driver/nsxv_logical_port.rb \
src/vmm_mad/remotes/lib/nsx_driver/nsx_rule.rb \
src/vmm_mad/remotes/lib/nsx_driver/nsxt_rule.rb \
src/vmm_mad/remotes/lib/nsx_driver/nsxv_rule.rb"
2019-07-31 19:25:22 +03:00
2018-11-28 14:26:59 +03:00
#-------------------------------------------------------------------------------
# VMM SH Driver LXD scripts, to be installed under $REMOTES_LOCATION/vmm/lxd
#-------------------------------------------------------------------------------
VMM_EXEC_LXD_SCRIPTS = " src/vmm_mad/remotes/lxd/cancel \
src/vmm_mad/remotes/lxd/deploy \
src/vmm_mad/remotes/lxd/migrate \
src/vmm_mad/remotes/lxd/migrate_local \
src/vmm_mad/remotes/lxd/restore \
src/vmm_mad/remotes/lxd/reboot \
src/vmm_mad/remotes/lxd/reset \
src/vmm_mad/remotes/lxd/save \
src/vmm_mad/remotes/lxd/attach_disk \
src/vmm_mad/remotes/lxd/detach_disk \
src/vmm_mad/remotes/lxd/attach_nic \
src/vmm_mad/remotes/lxd/detach_nic \
src/vmm_mad/remotes/lxd/snapshot_create \
src/vmm_mad/remotes/lxd/snapshot_revert \
src/vmm_mad/remotes/lxd/snapshot_delete \
src/vmm_mad/remotes/lxd/shutdown \
src/vmm_mad/remotes/lxd/reconfigure \
src/vmm_mad/remotes/lxd/prereconfigure \
src/vmm_mad/remotes/lxd/resize_disk"
VMM_EXEC_LXD_LIB = " src/vmm_mad/remotes/lib/lxd/opennebula_vm.rb \
src/vmm_mad/remotes/lib/lxd/mapper/mapper.rb \
src/vmm_mad/remotes/lib/lxd/mapper/qcow2.rb \
src/vmm_mad/remotes/lib/lxd/mapper/raw.rb \
src/vmm_mad/remotes/lib/lxd/mapper/rbd.rb \
src/vmm_mad/remotes/lib/lxd/client.rb \
src/vmm_mad/remotes/lib/lxd/command.rb \
src/vmm_mad/remotes/lib/lxd/container.rb"
2021-03-04 23:07:50 +03:00
#-------------------------------------------------------------------------------
# VMM SH Driver LXC scripts, to be installed under $REMOTES_LOCATION/vmm/lxc
#-------------------------------------------------------------------------------
VMM_EXEC_LXC_SCRIPTS = "
src/vmm_mad/remotes/lxc/attach_disk \
src/vmm_mad/remotes/lxc/deploy \
src/vmm_mad/remotes/lxc/prereconfigure \
src/vmm_mad/remotes/lxc/reset \
src/vmm_mad/remotes/lxc/restore \
src/vmm_mad/remotes/lxc/snapshot_create \
src/vmm_mad/remotes/lxc/attach_nic \
src/vmm_mad/remotes/lxc/detach_disk \
src/vmm_mad/remotes/lxc/migrate \
src/vmm_mad/remotes/lxc/reboot \
src/vmm_mad/remotes/lxc/resize \
src/vmm_mad/remotes/lxc/save \
src/vmm_mad/remotes/lxc/snapshot_delete \
src/vmm_mad/remotes/lxc/cancel \
src/vmm_mad/remotes/lxc/detach_nic \
src/vmm_mad/remotes/lxc/migrate_local \
src/vmm_mad/remotes/lxc/reconfigure \
src/vmm_mad/remotes/lxc/resize_disk \
src/vmm_mad/remotes/lxc/shutdown \
src/vmm_mad/remotes/lxc/snapshot_revert"
VMM_EXEC_LXC_LIB = " src/vmm_mad/remotes/lib/lxc/opennebula_vm.rb \
src/vmm_mad/remotes/lib/lxc/client.rb \
src/vmm_mad/remotes/lib/lxc/command.rb \
src/vmm_mad/remotes/lib/lxc/container.rb \
src/vmm_mad/remotes/lib/lxc/storage/mappers/qcow2.rb \
src/vmm_mad/remotes/lib/lxc/storage/mappers/raw.rb \
src/vmm_mad/remotes/lib/lxc/storage/mappers/rbd.rb \
2021-05-12 14:12:57 +03:00
src/vmm_mad/remotes/lib/lxc/storage/mappers/device.rb \
2021-03-04 23:07:50 +03:00
src/vmm_mad/remotes/lib/lxc/storage/storageutils.rb"
2018-11-28 14:26:59 +03:00
#-------------------------------------------------------------------------------
2020-02-20 19:06:45 +03:00
# VMM SH Driver Firecracker scripts, to be installed under $REMOTES_LOCATION/vmm/firecracker
#-------------------------------------------------------------------------------
VMM_EXEC_FIRECRACKER_SCRIPTS = " src/vmm_mad/remotes/firecracker/deploy \
src/vmm_mad/remotes/firecracker/shutdown \
2023-05-10 17:24:31 +03:00
src/vmm_mad/remotes/firecracker/cancel \
src/vmm_mad/remotes/firecracker/migrate \
src/vmm_mad/remotes/firecracker/migrate_local \
src/vmm_mad/remotes/firecracker/restore \
src/vmm_mad/remotes/firecracker/reboot \
src/vmm_mad/remotes/firecracker/reset \
src/vmm_mad/remotes/firecracker/save \
src/vmm_mad/remotes/firecracker/attach_disk \
src/vmm_mad/remotes/firecracker/detach_disk \
src/vmm_mad/remotes/firecracker/attach_nic \
src/vmm_mad/remotes/firecracker/detach_nic \
src/vmm_mad/remotes/firecracker/snapshot_create \
src/vmm_mad/remotes/firecracker/snapshot_revert \
src/vmm_mad/remotes/firecracker/snapshot_delete \
src/vmm_mad/remotes/firecracker/reconfigure \
src/vmm_mad/remotes/firecracker/prereconfigure \
src/vmm_mad/remotes/firecracker/resize \
src/vmm_mad/remotes/firecracker/resize_disk"
2020-02-20 19:06:45 +03:00
VMM_EXEC_FIRECRACKER_LIB = " src/vmm_mad/remotes/lib/firecracker/opennebula_vm.rb \
2020-03-05 18:36:06 +03:00
src/vmm_mad/remotes/lib/firecracker/client.rb \
src/vmm_mad/remotes/lib/firecracker/microvm.rb \
2020-03-23 12:45:21 +03:00
src/vmm_mad/remotes/lib/firecracker/map_context \
2020-03-05 18:36:06 +03:00
src/vmm_mad/remotes/lib/firecracker/command.rb"
2020-02-20 19:06:45 +03:00
#-------------------------------------------------------------------------------
2018-11-28 14:26:59 +03:00
# VMM configuration LXD scripts, to be installed under $REMOTES_LOCATION/etc/vmm/lxd
#-------------------------------------------------------------------------------
VMM_EXEC_ETC_LXD_SCRIPTS = "src/vmm_mad/remotes/lxd/lxdrc"
2021-03-04 23:07:50 +03:00
#-------------------------------------------------------------------------------
2021-09-10 20:19:45 +03:00
# VMM configuration LXC scripts, to be installed under $REMOTES_LOCATION/etc/vmm/lxc
2021-03-04 23:07:50 +03:00
#-------------------------------------------------------------------------------
VMM_EXEC_ETC_LXC_SCRIPTS = "src/vmm_mad/remotes/lxc/lxcrc"
2021-09-10 20:19:45 +03:00
#-------------------------------------------------------------------------------
# LXC profiles, to be installed under $REMOTES_LOCATION/etc/vmm/lxc/profiles
#-------------------------------------------------------------------------------
VMM_EXEC_ETC_LXC_PROFILES = "src/vmm_mad/remotes/lxc/profile_privileged"
2020-02-20 19:06:45 +03:00
#-------------------------------------------------------------------------------
# VMM configuration Firecracker scripts, to be installed under $REMOTES_LOCATION/etc/vmm/firecracker
#-------------------------------------------------------------------------------
VMM_EXEC_ETC_FIRECRACKER_SCRIPTS = "src/vmm_mad/remotes/firecracker/firecrackerrc"
2010-08-24 18:44:42 +04:00
#-------------------------------------------------------------------------------
# VMM SH Driver KVM scripts, to be installed under $REMOTES_LOCATION/vmm/kvm
#-------------------------------------------------------------------------------
2011-06-01 20:57:47 +04:00
VMM_EXEC_KVM_SCRIPTS = " src/vmm_mad/remotes/kvm/cancel \
2012-09-24 18:18:17 +04:00
src/vmm_mad/remotes/kvm/deploy \
src/vmm_mad/remotes/kvm/migrate \
src/vmm_mad/remotes/kvm/migrate_local \
src/vmm_mad/remotes/kvm/restore \
2015-10-29 18:24:39 +03:00
src/vmm_mad/remotes/kvm/restore.ceph \
2012-09-24 18:18:17 +04:00
src/vmm_mad/remotes/kvm/reboot \
src/vmm_mad/remotes/kvm/reset \
src/vmm_mad/remotes/kvm/save \
2015-10-29 18:24:39 +03:00
src/vmm_mad/remotes/kvm/save.ceph \
2012-09-24 18:18:17 +04:00
src/vmm_mad/remotes/kvm/attach_disk \
src/vmm_mad/remotes/kvm/detach_disk \
2013-03-07 04:24:32 +04:00
src/vmm_mad/remotes/kvm/attach_nic \
src/vmm_mad/remotes/kvm/detach_nic \
2013-02-20 18:05:58 +04:00
src/vmm_mad/remotes/kvm/snapshot_create \
2013-02-20 19:53:56 +04:00
src/vmm_mad/remotes/kvm/snapshot_revert \
2013-02-21 18:03:14 +04:00
src/vmm_mad/remotes/kvm/snapshot_delete \
2016-01-29 17:51:07 +03:00
src/vmm_mad/remotes/kvm/shutdown \
2016-01-29 18:28:31 +03:00
src/vmm_mad/remotes/kvm/reconfigure \
2016-11-23 21:36:36 +03:00
src/vmm_mad/remotes/kvm/prereconfigure \
2020-11-17 13:24:52 +03:00
src/vmm_mad/remotes/kvm/resize \
2016-11-23 21:36:36 +03:00
src/vmm_mad/remotes/kvm/resize_disk"
2010-08-24 18:44:42 +04:00
2022-08-11 02:46:57 +03:00
VMM_EXEC_KVM_LIB = "src/vmm_mad/remotes/lib/kvm/opennebula_vm.rb"
2018-01-18 14:27:38 +03:00
#-------------------------------------------------------------------------------
# VMM configuration KVM scripts, to be installed under $REMOTES_LOCATION/etc/vmm/kvm
#-------------------------------------------------------------------------------
VMM_EXEC_ETC_KVM_SCRIPTS = "src/vmm_mad/remotes/kvm/kvmrc"
2014-09-19 15:14:57 +04:00
#-------------------------------------------------------------------------------
# VMM Driver vCenter scripts, installed under $REMOTES_LOCATION/vmm/vcenter
#-------------------------------------------------------------------------------
2014-09-19 21:26:02 +04:00
VMM_EXEC_VCENTER_SCRIPTS = " src/vmm_mad/remotes/vcenter/cancel \
2014-09-19 15:14:57 +04:00
src/vmm_mad/remotes/vcenter/attach_disk \
src/vmm_mad/remotes/vcenter/detach_disk \
src/vmm_mad/remotes/vcenter/attach_nic \
src/vmm_mad/remotes/vcenter/detach_nic \
src/vmm_mad/remotes/vcenter/snapshot_create \
src/vmm_mad/remotes/vcenter/snapshot_revert \
src/vmm_mad/remotes/vcenter/snapshot_delete \
src/vmm_mad/remotes/vcenter/deploy \
src/vmm_mad/remotes/vcenter/migrate \
src/vmm_mad/remotes/vcenter/restore \
src/vmm_mad/remotes/vcenter/reboot \
src/vmm_mad/remotes/vcenter/reset \
src/vmm_mad/remotes/vcenter/save \
2020-05-20 17:17:06 +03:00
src/vmm_mad/remotes/vcenter/resize_disk \
2020-12-01 12:54:06 +03:00
src/vmm_mad/remotes/vcenter/resize \
2016-03-01 17:04:17 +03:00
src/vmm_mad/remotes/vcenter/shutdown \
src/vmm_mad/remotes/vcenter/reconfigure \
2017-04-26 12:24:42 +03:00
src/vmm_mad/remotes/vcenter/preconfigure \
src/vmm_mad/remotes/vcenter/prereconfigure"
2014-09-19 15:14:57 +04:00
2018-11-15 16:47:34 +03:00
#-------------------------------------------------------------------------------
# VMM configuration VCENTER scripts, to be installed under $REMOTES_LOCATION/etc/vmm/vcenter
#-------------------------------------------------------------------------------
2019-01-09 13:23:33 +03:00
VMM_EXEC_ETC_VCENTER_SCRIPTS = "src/vmm_mad/remotes/vcenter/vcenterrc"
2018-11-15 16:47:34 +03:00
2013-09-03 20:40:41 +04:00
#------------------------------------------------------------------------------
# VMM Driver EC2 scripts, to be installed under $REMOTES_LOCATION/vmm/ec2
#------------------------------------------------------------------------------
VMM_EXEC_EC2_SCRIPTS = " src/vmm_mad/remotes/ec2/cancel \
src/vmm_mad/remotes/ec2/attach_disk \
src/vmm_mad/remotes/ec2/detach_disk \
src/vmm_mad/remotes/ec2/attach_nic \
src/vmm_mad/remotes/ec2/detach_nic \
src/vmm_mad/remotes/ec2/snapshot_create \
src/vmm_mad/remotes/ec2/snapshot_revert \
src/vmm_mad/remotes/ec2/snapshot_delete \
src/vmm_mad/remotes/ec2/deploy \
src/vmm_mad/remotes/ec2/migrate \
src/vmm_mad/remotes/ec2/restore \
src/vmm_mad/remotes/ec2/reboot \
src/vmm_mad/remotes/ec2/reset \
src/vmm_mad/remotes/ec2/save \
2016-03-01 19:26:08 +03:00
src/vmm_mad/remotes/ec2/shutdown \
src/vmm_mad/remotes/ec2/reconfigure \
2016-11-24 20:03:42 +03:00
src/vmm_mad/remotes/ec2/prereconfigure \
src/vmm_mad/remotes/ec2/resize_disk"
2013-09-03 20:40:41 +04:00
2014-06-19 19:03:18 +04:00
#------------------------------------------------------------------------------
# VMM Driver Azure scripts, to be installed under $REMOTES_LOCATION/vmm/az
#------------------------------------------------------------------------------
VMM_EXEC_AZ_SCRIPTS = " src/vmm_mad/remotes/az/cancel \
src/vmm_mad/remotes/az/attach_disk \
src/vmm_mad/remotes/az/detach_disk \
src/vmm_mad/remotes/az/attach_nic \
src/vmm_mad/remotes/az/detach_nic \
src/vmm_mad/remotes/az/snapshot_create \
src/vmm_mad/remotes/az/snapshot_revert \
src/vmm_mad/remotes/az/snapshot_delete \
src/vmm_mad/remotes/az/deploy \
src/vmm_mad/remotes/az/migrate \
src/vmm_mad/remotes/az/restore \
src/vmm_mad/remotes/az/reboot \
src/vmm_mad/remotes/az/reset \
src/vmm_mad/remotes/az/save \
2016-03-01 19:26:08 +03:00
src/vmm_mad/remotes/az/shutdown \
src/vmm_mad/remotes/az/reconfigure \
2016-11-24 20:03:42 +03:00
src/vmm_mad/remotes/az/prereconfigure \
src/vmm_mad/remotes/az/resize_disk"
2014-06-19 19:03:18 +04:00
2018-02-15 18:00:09 +03:00
#------------------------------------------------------------------------------
# VMM Driver opennebula scripts, to be installed under $REMOTES_LOCATION/vmm/one
#------------------------------------------------------------------------------
VMM_EXEC_ONE_SCRIPTS = " src/vmm_mad/remotes/one/cancel \
src/vmm_mad/remotes/one/attach_disk \
src/vmm_mad/remotes/one/detach_disk \
src/vmm_mad/remotes/one/attach_nic \
src/vmm_mad/remotes/one/detach_nic \
src/vmm_mad/remotes/one/snapshot_create \
src/vmm_mad/remotes/one/snapshot_revert \
src/vmm_mad/remotes/one/snapshot_delete \
src/vmm_mad/remotes/one/deploy \
src/vmm_mad/remotes/one/migrate \
src/vmm_mad/remotes/one/migrate_local \
src/vmm_mad/remotes/one/restore \
src/vmm_mad/remotes/one/reboot \
src/vmm_mad/remotes/one/reset \
src/vmm_mad/remotes/one/save \
src/vmm_mad/remotes/one/shutdown \
src/vmm_mad/remotes/one/reconfigure \
src/vmm_mad/remotes/one/prereconfigure"
2018-11-29 17:14:17 +03:00
#------------------------------------------------------------------------------
2021-09-17 10:51:14 +03:00
# VMM Driver Equinix scripts, to be installed under $REMOTES_LOCATION/vmm/equinix
2018-11-29 17:14:17 +03:00
#------------------------------------------------------------------------------
2021-09-17 10:51:14 +03:00
VMM_EXEC_EQUINIX_SCRIPTS = " src/vmm_mad/remotes/equinix/cancel \
src/vmm_mad/remotes/equinix/deploy \
src/vmm_mad/remotes/equinix/reboot \
src/vmm_mad/remotes/equinix/reset \
src/vmm_mad/remotes/equinix/poll \
src/vmm_mad/remotes/equinix/shutdown"
2018-11-29 17:14:17 +03:00
2009-01-02 17:58:51 +03:00
#-------------------------------------------------------------------------------
2011-07-28 05:43:50 +04:00
# Information Manager Probes, to be installed under $REMOTES_LOCATION/im
2009-01-02 17:58:51 +03:00
#-------------------------------------------------------------------------------
2020-03-04 18:05:57 +03:00
IM_PROBES_FILES = " \
2020-03-11 19:50:31 +03:00
src/im_mad/remotes/run_monitord_client \
src/im_mad/remotes/stop_monitord_client"
2020-03-04 18:05:57 +03:00
IM_PROBES_LIB_FILES = " \
src/im_mad/remotes/lib/kvm.rb \
src/im_mad/remotes/lib/lxd.rb \
2021-03-04 23:07:50 +03:00
src/im_mad/remotes/lib/lxc.rb \
2020-03-04 18:05:57 +03:00
src/im_mad/remotes/lib/linux.rb \
2020-03-23 12:45:21 +03:00
src/im_mad/remotes/lib/firecracker.rb\
2020-03-04 18:05:57 +03:00
src/im_mad/remotes/lib/numa_common.rb \
2020-04-20 18:26:39 +03:00
src/im_mad/remotes/lib/probe_db.rb \
2020-05-25 19:47:25 +03:00
src/im_mad/remotes/lib/vcenter_monitor.rb \
src/im_mad/remotes/lib/vcenter_cluster.rb \
2020-12-11 16:28:48 +03:00
src/im_mad/remotes/lib/vcenter_monitor_vms.rb \
2020-05-25 19:47:25 +03:00
src/im_mad/remotes/lib/monitord_client.rb \
2020-05-18 14:48:42 +03:00
src/im_mad/remotes/lib/domain.rb \
src/im_mad/remotes/lib/process_list.rb"
2020-03-04 18:05:57 +03:00
# KVM PROBES
IM_PROBES_KVM_FILES = " \
2020-03-11 19:50:31 +03:00
src/im_mad/remotes/kvm.d/monitord-client_control.sh \
src/im_mad/remotes/kvm.d/monitord-client.rb"
2020-03-04 18:05:57 +03:00
IM_PROBES_KVM_HOST_BEACON_FILES = " \
2020-03-11 19:50:31 +03:00
src/im_mad/remotes/kvm-probes.d/host/beacon/monitord-client-shepherd.sh \
2020-03-04 18:05:57 +03:00
src/im_mad/remotes/kvm-probes.d/host/beacon/date.sh"
IM_PROBES_KVM_HOST_MONITOR_FILES = " \
src/im_mad/remotes/kvm-probes.d/host/monitor/linux_usage.rb \
src/im_mad/remotes/kvm-probes.d/host/monitor/numa_usage.rb"
IM_PROBES_KVM_HOST_SYSTEM_FILES = " \
src/im_mad/remotes/kvm-probes.d/host/system/architecture.sh \
src/im_mad/remotes/kvm-probes.d/host/system/cpu.sh \
F #1636: Add KVM_CPU_FEATURES to Virtual Machines
Add support to <feature> element for the virtual cpu (see [1]). It
includes:
* A new probe that gets the supported features of the hypervisor CPU
using virsh capabilities.
* Generate AUTOMATIC_REQUIREMENTS if the CPU_MODEL/FEATURES is present.
Note that a MODEL needs to be set for this to work (libvirt error otherwise is:
"XML error: Non-empty feature list specified without CPU model...")
[1] https://libvirt.org/formatdomain.html#cpu-model-and-topology
Example
--------------------------------------------------------------------------------
* Template configuration:
CPU_MODEL = [
MODEL = "host-passthrough",
FEATURES = "ss,vmx,tsc_adjust"
]
* Generated AUTOMATIC_REQUIREMENTS in the VM:
AUTOMATIC_REQUIREMENTS="(CLUSTER_ID = 0) & !(PUBLIC_CLOUD = YES) & !(PIN_POLICY = PINNED) & (KVM_CPU_FEATURES = \"*ss*\") & (KVM_CPU_FEATURES = \"*vmx*\") & (KVM_CPU_FEATURES = \"*tsc_adjust*\")"
* Generated deployment file:
<cpu mode='host-passthrough'>
<feature policy='require' name='ss'/>
<feature policy='require' name='vmx'/>
<feature policy='require' name='tsc_adjust'/>
</cpu>
* Information gathered by the probe:
...
MONITORING INFORMATION
ARCH="x86_64"
CGROUPS_VERSION="2"
...
KVM_CPU_FEATURES="ss,vmx,pdcm,osxsave,hypervisor,tsc_adjust,clflushopt,umip,md-clear,stibp,arch-capabilities,ssbd,xsaves,pdpe1gb,ibpb,ibrs,amd-stibp,amd-ssbd,rdctl-no,ibrs-all,skip-l1dfl-vmentry,mds-no,pschange-mc-no"
KVM_CPU_MODEL="Skylake-Client-noTSX-IBRS"
...
co-authored-by: Neal Hansen <nhansen@opennebula.io>
2023-09-01 14:46:47 +03:00
src/im_mad/remotes/kvm-probes.d/host/system/cpu_features.sh \
2020-03-04 18:05:57 +03:00
src/im_mad/remotes/kvm-probes.d/host/system/linux_host.rb \
src/im_mad/remotes/kvm-probes.d/host/system/machines_models.rb \
src/im_mad/remotes/kvm-probes.d/host/system/monitor_ds.rb \
src/im_mad/remotes/kvm-probes.d/host/system/name.sh \
src/im_mad/remotes/kvm-probes.d/host/system/numa_host.rb \
src/im_mad/remotes/kvm-probes.d/host/system/wild_vm.rb \
src/im_mad/remotes/kvm-probes.d/host/system/pci.rb \
src/im_mad/remotes/kvm-probes.d/host/system/version.sh"
IM_PROBES_KVM_VM_MONITOR_FILES = " \
2020-03-20 18:00:14 +03:00
src/im_mad/remotes/kvm-probes.d/vm/monitor/poll.rb \
src/im_mad/remotes/kvm-probes.d/vm/monitor/monitor_ds_vm.rb"
2020-03-04 18:05:57 +03:00
IM_PROBES_KVM_VM_STATUS_FILES = " \
2020-03-20 18:00:14 +03:00
src/im_mad/remotes/kvm-probes.d/vm/status/state.rb"
2020-03-04 18:05:57 +03:00
2021-01-21 12:30:45 +03:00
IM_PROBES_KVM_VM_SNAPSHOT_FILES = " \
src/im_mad/remotes/kvm-probes.d/vm/snapshot/recovery.rb"
2020-03-04 18:05:57 +03:00
IM_PROBES_ETC_KVM_PROBES_FILES = " \
src/im_mad/remotes/kvm-probes.d/pci.conf \
src/im_mad/remotes/lib/probe_db.conf"
2021-03-03 14:20:37 +03:00
IM_PROBES_QEMU_FILES = " \
src/im_mad/remotes/qemu.d/monitord-client_control.sh \
src/im_mad/remotes/qemu.d/monitord-client.rb"
IM_PROBES_QEMU_HOST_BEACON_FILES = " \
src/im_mad/remotes/qemu-probes.d/host/beacon/monitord-client-shepherd.sh \
src/im_mad/remotes/qemu-probes.d/host/beacon/date.sh"
IM_PROBES_QEMU_HOST_MONITOR_FILES = " \
src/im_mad/remotes/qemu-probes.d/host/monitor/linux_usage.rb \
src/im_mad/remotes/qemu-probes.d/host/monitor/numa_usage.rb"
IM_PROBES_QEMU_HOST_SYSTEM_FILES = " \
src/im_mad/remotes/qemu-probes.d/host/system/architecture.sh \
src/im_mad/remotes/qemu-probes.d/host/system/cpu.sh \
src/im_mad/remotes/qemu-probes.d/host/system/linux_host.rb \
src/im_mad/remotes/qemu-probes.d/host/system/machines_models.rb \
src/im_mad/remotes/qemu-probes.d/host/system/monitor_ds.rb \
src/im_mad/remotes/qemu-probes.d/host/system/name.sh \
src/im_mad/remotes/qemu-probes.d/host/system/numa_host.rb \
src/im_mad/remotes/qemu-probes.d/host/system/wild_vm.rb \
src/im_mad/remotes/qemu-probes.d/host/system/pci.rb \
src/im_mad/remotes/qemu-probes.d/host/system/version.sh"
IM_PROBES_QEMU_VM_MONITOR_FILES = " \
src/im_mad/remotes/qemu-probes.d/vm/monitor/poll.rb \
src/im_mad/remotes/qemu-probes.d/vm/monitor/monitor_ds_vm.rb"
IM_PROBES_QEMU_VM_STATUS_FILES = " \
src/im_mad/remotes/qemu-probes.d/vm/status/state.rb"
IM_PROBES_QEMU_VM_SNAPSHOT_FILES = " \
src/im_mad/remotes/qemu-probes.d/vm/snapshot/recovery.rb"
IM_PROBES_ETC_QEMU_PROBES_FILES = " \
src/im_mad/remotes/qemu-probes.d/pci.conf \
src/im_mad/remotes/lib/probe_db.conf"
2020-03-04 18:05:57 +03:00
# DUMMY PROBES
IM_PROBES_DUMMY_FILES = " \
2020-03-11 19:50:31 +03:00
src/im_mad/remotes/dummy.d/monitord-client_control.sh \
src/im_mad/remotes/dummy.d/monitord-client.rb"
2020-03-04 18:05:57 +03:00
IM_PROBES_DUMMY_HOST_BEACON_FILES = " \
2020-03-11 19:50:31 +03:00
src/im_mad/remotes/dummy-probes.d/host/beacon/monitord-client-shepherd_local.sh \
2020-03-04 18:05:57 +03:00
src/im_mad/remotes/dummy-probes.d/host/beacon/date.sh"
IM_PROBES_DUMMY_HOST_MONITOR_FILES = " \
src/im_mad/remotes/dummy-probes.d/host/monitor/monitor.rb"
IM_PROBES_DUMMY_HOST_SYSTEM_FILES = " \
src/im_mad/remotes/dummy-probes.d/host/system/system.rb"
IM_PROBES_DUMMY_VM_MONITOR_FILES = " \
2020-03-20 18:00:14 +03:00
src/im_mad/remotes/dummy-probes.d/vm/monitor/monitor.rb"
2020-03-04 18:05:57 +03:00
2021-01-25 13:03:13 +03:00
IM_PROBES_DUMMY_VM_STATUS_FILES = ""
2020-03-04 18:05:57 +03:00
# LXD PROBES
IM_PROBES_LXD_FILES = " \
2020-03-11 19:50:31 +03:00
src/im_mad/remotes/lxd.d/monitord-client_control.sh \
src/im_mad/remotes/lxd.d/monitord-client.rb"
2020-03-04 18:05:57 +03:00
IM_PROBES_LXD_HOST_BEACON_FILES = " \
2020-03-11 19:50:31 +03:00
src/im_mad/remotes/lxd-probes.d/host/beacon/monitord-client-shepherd.sh \
2020-03-04 18:05:57 +03:00
src/im_mad/remotes/lxd-probes.d/host/beacon/date.sh"
IM_PROBES_LXD_HOST_MONITOR_FILES = " \
src/im_mad/remotes/lxd-probes.d/host/monitor/linux_usage.rb \
src/im_mad/remotes/lxd-probes.d/host/monitor/numa_usage.rb"
IM_PROBES_LXD_HOST_SYSTEM_FILES = " \
src/im_mad/remotes/lxd-probes.d/host/system/architecture.sh \
src/im_mad/remotes/lxd-probes.d/host/system/cpu.sh \
src/im_mad/remotes/lxd-probes.d/host/system/linux_host.rb \
src/im_mad/remotes/lxd-probes.d/host/system/monitor_ds.rb \
src/im_mad/remotes/lxd-probes.d/host/system/name.sh \
src/im_mad/remotes/lxd-probes.d/host/system/numa_host.rb \
src/im_mad/remotes/lxd-probes.d/host/system/wild_vm.rb \
src/im_mad/remotes/lxd-probes.d/host/system/pci.rb \
src/im_mad/remotes/lxd-probes.d/host/system/profiles.sh \
src/im_mad/remotes/lxd-probes.d/host/system/version.sh"
IM_PROBES_LXD_VM_MONITOR_FILES = " \
2020-03-20 18:00:14 +03:00
src/im_mad/remotes/lxd-probes.d/vm/monitor/poll.rb \
src/im_mad/remotes/lxd-probes.d/vm/monitor/monitor_ds_vm.rb"
2020-03-04 18:05:57 +03:00
IM_PROBES_LXD_VM_STATUS_FILES = " \
2020-03-20 18:00:14 +03:00
src/im_mad/remotes/lxd-probes.d/vm/status/state.rb"
2020-03-04 18:05:57 +03:00
IM_PROBES_ETC_LXD_PROBES_FILES = " \
src/im_mad/remotes/lxd-probes.d/pci.conf \
src/im_mad/remotes/lib/probe_db.conf"
2018-11-28 14:26:59 +03:00
2021-03-04 23:07:50 +03:00
# LXC PROBES
IM_PROBES_LXC_FILES = " \
src/im_mad/remotes/lxc.d/monitord-client_control.sh \
src/im_mad/remotes/lxc.d/monitord-client.rb"
IM_PROBES_LXC_HOST_BEACON_FILES = " \
src/im_mad/remotes/lxc-probes.d/host/beacon/monitord-client-shepherd.sh \
src/im_mad/remotes/lxc-probes.d/host/beacon/date.sh"
IM_PROBES_LXC_HOST_MONITOR_FILES = " \
src/im_mad/remotes/lxc-probes.d/host/monitor/linux_usage.rb \
src/im_mad/remotes/lxc-probes.d/host/monitor/numa_usage.rb"
IM_PROBES_LXC_HOST_SYSTEM_FILES = " \
src/im_mad/remotes/lxc-probes.d/host/system/architecture.sh \
src/im_mad/remotes/lxc-probes.d/host/system/cpu.sh \
src/im_mad/remotes/lxc-probes.d/host/system/linux_host.rb \
src/im_mad/remotes/lxc-probes.d/host/system/monitor_ds.rb \
src/im_mad/remotes/lxc-probes.d/host/system/name.sh \
src/im_mad/remotes/lxc-probes.d/host/system/numa_host.rb \
src/im_mad/remotes/lxc-probes.d/host/system/version.sh"
IM_PROBES_LXC_VM_MONITOR_FILES = " \
src/im_mad/remotes/lxc-probes.d/vm/monitor/poll.rb \
src/im_mad/remotes/lxc-probes.d/vm/monitor/monitor_ds_vm.rb"
IM_PROBES_LXC_VM_STATUS_FILES = " \
src/im_mad/remotes/lxc-probes.d/vm/status/state.rb"
IM_PROBES_ETC_LXC_PROBES_FILES = " \
src/im_mad/remotes/lib/probe_db.conf"
2020-03-23 12:45:21 +03:00
# Firecracker PROBES
IM_PROBES_FIRECRACKER_FILES = " \
src/im_mad/remotes/firecracker.d/monitord-client_control.sh \
src/im_mad/remotes/firecracker.d/monitord-client.rb"
IM_PROBES_FIRECRACKER_HOST_BEACON_FILES = " \
src/im_mad/remotes/firecracker-probes.d/host/beacon/monitord-client-shepherd.sh \
src/im_mad/remotes/firecracker-probes.d/host/beacon/date.sh"
IM_PROBES_FIRECRACKER_HOST_MONITOR_FILES = " \
src/im_mad/remotes/firecracker-probes.d/host/monitor/linux_usage.rb \
src/im_mad/remotes/firecracker-probes.d/host/monitor/numa_usage.rb"
IM_PROBES_FIRECRACKER_HOST_SYSTEM_FILES = " \
src/im_mad/remotes/firecracker-probes.d/host/system/architecture.sh \
src/im_mad/remotes/firecracker-probes.d/host/system/cpu.sh \
src/im_mad/remotes/firecracker-probes.d/host/system/linux_host.rb \
src/im_mad/remotes/firecracker-probes.d/host/system/monitor_ds.rb \
src/im_mad/remotes/firecracker-probes.d/host/system/name.sh \
src/im_mad/remotes/firecracker-probes.d/host/system/numa_host.rb \
src/im_mad/remotes/firecracker-probes.d/host/system/version.sh"
IM_PROBES_FIRECRACKER_VM_MONITOR_FILES = " \
2020-03-23 13:18:42 +03:00
src/im_mad/remotes/firecracker-probes.d/vm/monitor/poll.rb \
src/im_mad/remotes/firecracker-probes.d/vm/monitor/monitor_ds_vm.rb"
2020-03-23 12:45:21 +03:00
IM_PROBES_FIRECRACKER_VM_STATUS_FILES = " \
2020-03-23 13:18:42 +03:00
src/im_mad/remotes/firecracker-probes.d/vm/status/state.rb"
2020-03-23 12:45:21 +03:00
IM_PROBES_ETC_FIRECRACKER_PROBES_FILES = "src/im_mad/remotes/lib/probe_db.conf"
2020-05-25 19:47:25 +03:00
IM_PROBES_VCENTER_FILES = "src/im_mad/remotes/vcenter.d/monitord-client_control.sh"
2014-09-19 15:14:57 +04:00
2020-04-05 20:50:45 +03:00
# EC2 monitord-client
IM_PROBES_EC2_FILES = " \
src/im_mad/remotes/ec2.d/monitord-client_control.sh \
src/im_mad/remotes/ec2.d/monitord-client.rb"
# EC2 probes
2020-04-20 18:48:22 +03:00
IM_PROBES_EC2_HOST_BEACON_FILES = " \
src/im_mad/remotes/ec2-probes.d/host/beacon/monitord-client-shepherd_local.sh"
2020-04-05 20:50:45 +03:00
IM_PROBES_EC2_HOST_MONITOR_FILES = " \
src/im_mad/remotes/ec2-probes.d/host/monitor/probe_host_monitor.rb"
IM_PROBES_EC2_HOST_SYSTEM_FILES = " \
src/im_mad/remotes/ec2-probes.d/host/system/probe_host_system.rb"
IM_PROBES_EC2_VM_MONITOR_FILES = " \
src/im_mad/remotes/ec2-probes.d/vm/monitor/probe_vm_monitor.rb"
IM_PROBES_EC2_VM_STATUS_FILES = " \
src/im_mad/remotes/ec2-probes.d/vm/status/probe_vm_status.rb"
# AZ monitord-client
IM_PROBES_AZ_FILES = " \
src/im_mad/remotes/az.d/monitord-client_control.sh \
src/im_mad/remotes/az.d/monitord-client.rb"
# AZ probes
2020-04-20 18:48:22 +03:00
IM_PROBES_AZ_HOST_BEACON_FILES = " \
src/im_mad/remotes/az-probes.d/host/beacon/monitord-client-shepherd_local.sh"
2020-04-05 20:50:45 +03:00
IM_PROBES_AZ_HOST_MONITOR_FILES = " \
src/im_mad/remotes/az-probes.d/host/monitor/probe_host_monitor.rb"
IM_PROBES_AZ_HOST_SYSTEM_FILES = " \
src/im_mad/remotes/az-probes.d/host/system/probe_host_system.rb"
IM_PROBES_AZ_VM_MONITOR_FILES = " \
src/im_mad/remotes/az-probes.d/vm/monitor/probe_vm_monitor.rb"
2013-09-03 20:40:41 +04:00
2020-04-05 20:50:45 +03:00
IM_PROBES_AZ_VM_STATUS_FILES = " \
src/im_mad/remotes/az-probes.d/vm/status/probe_vm_status.rb"
2014-06-05 17:19:54 +04:00
2020-04-14 18:09:43 +03:00
# ONE monitord-client
IM_PROBES_ONE_FILES = " \
src/im_mad/remotes/one.d/monitord-client_control.sh \
src/im_mad/remotes/one.d/monitord-client.rb"
# ONE probes
2020-04-20 18:48:22 +03:00
IM_PROBES_ONE_HOST_BEACON_FILES = " \
src/im_mad/remotes/one-probes.d/host/beacon/monitord-client-shepherd_local.sh"
2020-04-14 18:09:43 +03:00
IM_PROBES_ONE_HOST_MONITOR_FILES = " \
src/im_mad/remotes/one-probes.d/host/monitor/probe_host_monitor.rb"
IM_PROBES_ONE_HOST_SYSTEM_FILES = " \
src/im_mad/remotes/one-probes.d/host/system/probe_host_system.rb"
IM_PROBES_ONE_VM_MONITOR_FILES = " \
src/im_mad/remotes/one-probes.d/vm/monitor/probe_vm_monitor.rb"
IM_PROBES_ONE_VM_STATUS_FILES = " \
src/im_mad/remotes/one-probes.d/vm/status/probe_vm_status.rb"
2021-09-17 10:51:14 +03:00
IM_PROBES_EQUINIX_FILES = "src/im_mad/remotes/equinix.d/poll"
2018-11-29 17:14:17 +03:00
2021-09-17 10:51:14 +03:00
# EQUINIX monitord-client
IM_PROBES_EQUINIX_FILES = " \
src/im_mad/remotes/equinix.d/monitord-client_control.sh \
src/im_mad/remotes/equinix.d/monitord-client.rb"
2020-04-20 18:46:48 +03:00
2021-09-17 10:51:14 +03:00
# EQUINIX probes
IM_PROBES_EQUINIX_HOST_BEACON_FILES = " \
src/im_mad/remotes/equinix-probes.d/host/beacon/monitord-client-shepherd_local.sh"
2020-04-20 18:46:48 +03:00
2021-09-17 10:51:14 +03:00
IM_PROBES_EQUINIX_HOST_MONITOR_FILES = " \
src/im_mad/remotes/equinix-probes.d/host/monitor/probe_host_monitor.rb"
2020-04-20 18:46:48 +03:00
2021-09-17 10:51:14 +03:00
IM_PROBES_EQUINIX_HOST_SYSTEM_FILES = " \
src/im_mad/remotes/equinix-probes.d/host/system/probe_host_system.rb"
2020-04-20 18:46:48 +03:00
2021-09-17 10:51:14 +03:00
IM_PROBES_EQUINIX_VM_MONITOR_FILES = " \
src/im_mad/remotes/equinix-probes.d/vm/monitor/probe_vm_monitor.rb"
2020-04-20 18:46:48 +03:00
2021-09-17 10:51:14 +03:00
IM_PROBES_EQUINIX_VM_STATUS_FILES = " \
src/im_mad/remotes/equinix-probes.d/vm/status/probe_vm_status.rb"
2020-04-20 18:46:48 +03:00
2013-11-25 22:21:47 +04:00
IM_PROBES_VERSION = "src/im_mad/remotes/VERSION"
2011-07-28 05:43:50 +04:00
#-------------------------------------------------------------------------------
# Auth Manager drivers to be installed under $REMOTES_LOCATION/auth
#-------------------------------------------------------------------------------
2011-07-28 21:49:30 +04:00
2011-10-21 04:27:47 +04:00
AUTH_SERVER_CIPHER_FILES = "src/authm_mad/remotes/server_cipher/authenticate"
2011-10-24 19:24:42 +04:00
AUTH_SERVER_X509_FILES = "src/authm_mad/remotes/server_x509/authenticate"
2011-08-25 19:52:22 +04:00
2011-08-19 05:13:50 +04:00
AUTH_X509_FILES = "src/authm_mad/remotes/x509/authenticate"
2011-11-29 20:02:43 +04:00
AUTH_LDAP_FILES = "src/authm_mad/remotes/ldap/authenticate"
2011-07-28 05:43:50 +04:00
AUTH_SSH_FILES = "src/authm_mad/remotes/ssh/authenticate"
2009-01-02 17:58:51 +03:00
2011-07-28 21:49:30 +04:00
AUTH_DUMMY_FILES = "src/authm_mad/remotes/dummy/authenticate"
2011-07-28 14:39:40 +04:00
AUTH_PLAIN_FILES = "src/authm_mad/remotes/plain/authenticate"
2011-11-15 15:44:50 +04:00
#-------------------------------------------------------------------------------
# Virtual Network Manager drivers to be installed under $REMOTES_LOCATION/vnm
#-------------------------------------------------------------------------------
2014-12-24 02:39:41 +03:00
NETWORK_FILES = " src/vnm_mad/remotes/lib/vnm_driver.rb \
2014-12-22 21:50:44 +03:00
src/vnm_mad/remotes/lib/vnmmad.rb \
2014-12-24 02:39:41 +03:00
src/vnm_mad/remotes/lib/sg_driver.rb \
2014-12-22 21:10:30 +03:00
src/vnm_mad/remotes/lib/address.rb \
src/vnm_mad/remotes/lib/command.rb \
2014-12-22 21:50:44 +03:00
src/vnm_mad/remotes/lib/vm.rb \
2022-08-11 02:46:57 +03:00
src/vnm_mad/remotes/lib/vf.rb \
2015-01-05 04:31:03 +03:00
src/vnm_mad/remotes/lib/vlan.rb \
2018-05-16 16:24:37 +03:00
src/vnm_mad/remotes/lib/no_vlan.rb \
2014-12-22 21:10:30 +03:00
src/vnm_mad/remotes/lib/security_groups.rb \
src/vnm_mad/remotes/lib/security_groups_iptables.rb \
src/vnm_mad/remotes/lib/nic.rb"
2011-11-15 15:44:50 +04:00
2020-03-05 18:36:06 +03:00
NETWORK_HOOKS_PRE_FILES = "src/vnm_mad/remotes/hooks/pre/firecracker"
NETWORK_HOOKS_CLEAN_FILES = "src/vnm_mad/remotes/hooks/clean/firecracker"
2011-11-15 15:44:50 +04:00
NETWORK_8021Q_FILES = " src/vnm_mad/remotes/802.1Q/clean \
2012-09-24 18:18:17 +04:00
src/vnm_mad/remotes/802.1Q/post \
src/vnm_mad/remotes/802.1Q/pre \
2016-03-02 01:31:31 +03:00
src/vnm_mad/remotes/802.1Q/update_sg \
F #5989: Live update of Virtual Network attributes
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Christian González <cgonzalez@opennebula.io>
* VNET updates trigger a driver action on running VMs with NICs in the
network.
* VNET includes a sets with VM status: updated, outdated, error and
updating. With VMs in each state.
* VNET flags error situations with a new state UPDATE_FAILURE.
* The same procedure is applied when an AR is updated (only VMs in that
AR are updated).
* A new options in the one.vn.recover API call enable to recover or
retry this VM update operations.
* The following attributes can be live-updated per VNET driver:
- PHYDEV (novlan, vlan, ovs driver)
- MTU (vlan, ovs driver)
- VLAN_ID (vlan, ovs driver)
- QINQ_TYPE (ovs driver)
- CVLANS (ovs driver)
- VLAN_TAGGED_ID (ovs driver)
- OUTER_VLAN_ID (ovs driver)
- INBOUND_AVG_BW (SG, ovs driver + KVM)
- INBOUND_PEAK_BW (SG, ovs driver + KVM)
- INBOUND_PEAK_KB (SG, ovs driver + KVM)
- OUTBOUND_AVG_BW (SG, ovs driver + KVM)
- OUTBOUND_PEAK_BW (SG, ovs driver + KVM)
- OUTBOUND_PEAK_KB (SG, ovs driver + KVM)
* New API call one.vm.updatenic, allows to update individual NICs
without the need of detach/attach (only QoS supported).
* Update operations for: 802.1Q, bridge, fw, ovswitch, ovswitch_vxlan
and vxlan network drivers.
* VNET attributes (old values) stored in VNET_UPDATE to allow
implementation of update operations. The attribute is removed after a
successful update.
* Updates to CLI onevnet (--retry option) / onevm (nicupdate command)
* XSD files updated to reflect the new data model
* Ruby and JAVA bindings updated: new VNET state and recover option, new
VM API call.
* Suntone and Fireedge implementation (lease status, recover option, new
states)
TODO: Virtual Functions does not support this functionality
iii
2022-11-16 15:35:29 +03:00
src/vnm_mad/remotes/802.1Q/update_nic \
2022-04-09 15:48:15 +03:00
src/vnm_mad/remotes/802.1Q/vlan_tag_driver.rb \
src/vnm_mad/remotes/802.1Q/vnet_create \
src/vnm_mad/remotes/802.1Q/vnet_delete"
2015-01-05 04:31:03 +03:00
NETWORK_VXLAN_FILES = " src/vnm_mad/remotes/vxlan/clean \
src/vnm_mad/remotes/vxlan/post \
src/vnm_mad/remotes/vxlan/pre \
2016-03-02 01:31:31 +03:00
src/vnm_mad/remotes/vxlan/update_sg \
F #5989: Live update of Virtual Network attributes
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Christian González <cgonzalez@opennebula.io>
* VNET updates trigger a driver action on running VMs with NICs in the
network.
* VNET includes a sets with VM status: updated, outdated, error and
updating. With VMs in each state.
* VNET flags error situations with a new state UPDATE_FAILURE.
* The same procedure is applied when an AR is updated (only VMs in that
AR are updated).
* A new options in the one.vn.recover API call enable to recover or
retry this VM update operations.
* The following attributes can be live-updated per VNET driver:
- PHYDEV (novlan, vlan, ovs driver)
- MTU (vlan, ovs driver)
- VLAN_ID (vlan, ovs driver)
- QINQ_TYPE (ovs driver)
- CVLANS (ovs driver)
- VLAN_TAGGED_ID (ovs driver)
- OUTER_VLAN_ID (ovs driver)
- INBOUND_AVG_BW (SG, ovs driver + KVM)
- INBOUND_PEAK_BW (SG, ovs driver + KVM)
- INBOUND_PEAK_KB (SG, ovs driver + KVM)
- OUTBOUND_AVG_BW (SG, ovs driver + KVM)
- OUTBOUND_PEAK_BW (SG, ovs driver + KVM)
- OUTBOUND_PEAK_KB (SG, ovs driver + KVM)
* New API call one.vm.updatenic, allows to update individual NICs
without the need of detach/attach (only QoS supported).
* Update operations for: 802.1Q, bridge, fw, ovswitch, ovswitch_vxlan
and vxlan network drivers.
* VNET attributes (old values) stored in VNET_UPDATE to allow
implementation of update operations. The attribute is removed after a
successful update.
* Updates to CLI onevnet (--retry option) / onevm (nicupdate command)
* XSD files updated to reflect the new data model
* Ruby and JAVA bindings updated: new VNET state and recover option, new
VM API call.
* Suntone and Fireedge implementation (lease status, recover option, new
states)
TODO: Virtual Functions does not support this functionality
iii
2022-11-16 15:35:29 +03:00
src/vnm_mad/remotes/vxlan/update_nic \
2018-02-28 19:31:38 +03:00
src/vnm_mad/remotes/vxlan/vxlan.rb \
2022-04-09 15:48:15 +03:00
src/vnm_mad/remotes/vxlan/vxlan_driver.rb \
src/vnm_mad/remotes/vxlan/vnet_create \
src/vnm_mad/remotes/vxlan/vnet_delete"
2011-11-15 15:44:50 +04:00
NETWORK_DUMMY_FILES = " src/vnm_mad/remotes/dummy/clean \
2012-09-24 18:18:17 +04:00
src/vnm_mad/remotes/dummy/post \
2016-03-02 01:31:31 +03:00
src/vnm_mad/remotes/dummy/update_sg \
2022-04-09 15:48:15 +03:00
src/vnm_mad/remotes/dummy/pre \
2022-12-05 14:24:55 +03:00
src/vnm_mad/remotes/dummy/update_nic \
2022-04-09 15:48:15 +03:00
src/vnm_mad/remotes/dummy/vnet_create \
src/vnm_mad/remotes/dummy/vnet_delete"
2011-11-15 15:44:50 +04:00
2018-05-16 16:24:37 +03:00
NETWORK_BRIDGE_FILES = " src/vnm_mad/remotes/bridge/clean \
src/vnm_mad/remotes/bridge/post \
src/vnm_mad/remotes/bridge/update_sg \
2022-04-09 15:48:15 +03:00
src/vnm_mad/remotes/bridge/pre \
2022-12-05 14:24:55 +03:00
src/vnm_mad/remotes/bridge/update_nic \
2022-04-09 15:48:15 +03:00
src/vnm_mad/remotes/bridge/vnet_create \
src/vnm_mad/remotes/bridge/vnet_delete"
2018-05-16 16:24:37 +03:00
2011-11-15 15:44:50 +04:00
NETWORK_EBTABLES_FILES = " src/vnm_mad/remotes/ebtables/clean \
2012-09-24 18:18:17 +04:00
src/vnm_mad/remotes/ebtables/post \
src/vnm_mad/remotes/ebtables/pre \
2016-03-02 01:31:31 +03:00
src/vnm_mad/remotes/ebtables/update_sg \
2022-04-09 15:48:15 +03:00
src/vnm_mad/remotes/ebtables/Ebtables.rb \
src/vnm_mad/remotes/ebtables/vnet_create \
src/vnm_mad/remotes/ebtables/vnet_delete"
2011-11-15 15:44:50 +04:00
2011-12-02 20:37:49 +04:00
NETWORK_FW_FILES = " src/vnm_mad/remotes/fw/post \
2019-04-22 12:27:20 +03:00
src/vnm_mad/remotes/fw/pre \
src/vnm_mad/remotes/fw/update_sg \
F #5989: Live update of Virtual Network attributes
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Christian González <cgonzalez@opennebula.io>
* VNET updates trigger a driver action on running VMs with NICs in the
network.
* VNET includes a sets with VM status: updated, outdated, error and
updating. With VMs in each state.
* VNET flags error situations with a new state UPDATE_FAILURE.
* The same procedure is applied when an AR is updated (only VMs in that
AR are updated).
* A new options in the one.vn.recover API call enable to recover or
retry this VM update operations.
* The following attributes can be live-updated per VNET driver:
- PHYDEV (novlan, vlan, ovs driver)
- MTU (vlan, ovs driver)
- VLAN_ID (vlan, ovs driver)
- QINQ_TYPE (ovs driver)
- CVLANS (ovs driver)
- VLAN_TAGGED_ID (ovs driver)
- OUTER_VLAN_ID (ovs driver)
- INBOUND_AVG_BW (SG, ovs driver + KVM)
- INBOUND_PEAK_BW (SG, ovs driver + KVM)
- INBOUND_PEAK_KB (SG, ovs driver + KVM)
- OUTBOUND_AVG_BW (SG, ovs driver + KVM)
- OUTBOUND_PEAK_BW (SG, ovs driver + KVM)
- OUTBOUND_PEAK_KB (SG, ovs driver + KVM)
* New API call one.vm.updatenic, allows to update individual NICs
without the need of detach/attach (only QoS supported).
* Update operations for: 802.1Q, bridge, fw, ovswitch, ovswitch_vxlan
and vxlan network drivers.
* VNET attributes (old values) stored in VNET_UPDATE to allow
implementation of update operations. The attribute is removed after a
successful update.
* Updates to CLI onevnet (--retry option) / onevm (nicupdate command)
* XSD files updated to reflect the new data model
* Ruby and JAVA bindings updated: new VNET state and recover option, new
VM API call.
* Suntone and Fireedge implementation (lease status, recover option, new
states)
TODO: Virtual Functions does not support this functionality
iii
2022-11-16 15:35:29 +03:00
src/vnm_mad/remotes/fw/update_nic \
2022-04-09 15:48:15 +03:00
src/vnm_mad/remotes/fw/clean \
src/vnm_mad/remotes/fw/vnet_create \
src/vnm_mad/remotes/fw/vnet_delete"
2011-11-15 15:44:50 +04:00
NETWORK_OVSWITCH_FILES = " src/vnm_mad/remotes/ovswitch/clean \
2012-09-24 18:18:17 +04:00
src/vnm_mad/remotes/ovswitch/post \
src/vnm_mad/remotes/ovswitch/pre \
2016-03-02 01:31:31 +03:00
src/vnm_mad/remotes/ovswitch/update_sg \
F #5989: Live update of Virtual Network attributes
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Christian González <cgonzalez@opennebula.io>
* VNET updates trigger a driver action on running VMs with NICs in the
network.
* VNET includes a sets with VM status: updated, outdated, error and
updating. With VMs in each state.
* VNET flags error situations with a new state UPDATE_FAILURE.
* The same procedure is applied when an AR is updated (only VMs in that
AR are updated).
* A new options in the one.vn.recover API call enable to recover or
retry this VM update operations.
* The following attributes can be live-updated per VNET driver:
- PHYDEV (novlan, vlan, ovs driver)
- MTU (vlan, ovs driver)
- VLAN_ID (vlan, ovs driver)
- QINQ_TYPE (ovs driver)
- CVLANS (ovs driver)
- VLAN_TAGGED_ID (ovs driver)
- OUTER_VLAN_ID (ovs driver)
- INBOUND_AVG_BW (SG, ovs driver + KVM)
- INBOUND_PEAK_BW (SG, ovs driver + KVM)
- INBOUND_PEAK_KB (SG, ovs driver + KVM)
- OUTBOUND_AVG_BW (SG, ovs driver + KVM)
- OUTBOUND_PEAK_BW (SG, ovs driver + KVM)
- OUTBOUND_PEAK_KB (SG, ovs driver + KVM)
* New API call one.vm.updatenic, allows to update individual NICs
without the need of detach/attach (only QoS supported).
* Update operations for: 802.1Q, bridge, fw, ovswitch, ovswitch_vxlan
and vxlan network drivers.
* VNET attributes (old values) stored in VNET_UPDATE to allow
implementation of update operations. The attribute is removed after a
successful update.
* Updates to CLI onevnet (--retry option) / onevm (nicupdate command)
* XSD files updated to reflect the new data model
* Ruby and JAVA bindings updated: new VNET state and recover option, new
VM API call.
* Suntone and Fireedge implementation (lease status, recover option, new
states)
TODO: Virtual Functions does not support this functionality
iii
2022-11-16 15:35:29 +03:00
src/vnm_mad/remotes/ovswitch/update_nic \
2022-04-09 15:48:15 +03:00
src/vnm_mad/remotes/ovswitch/OpenvSwitch.rb \
src/vnm_mad/remotes/ovswitch/vnet_create \
src/vnm_mad/remotes/ovswitch/vnet_delete"
2011-11-15 15:44:50 +04:00
2018-02-28 19:31:38 +03:00
NETWORK_OVSWITCH_VXLAN_FILES = " src/vnm_mad/remotes/ovswitch_vxlan/clean \
src/vnm_mad/remotes/ovswitch_vxlan/post \
src/vnm_mad/remotes/ovswitch_vxlan/pre \
src/vnm_mad/remotes/ovswitch_vxlan/update_sg \
F #5989: Live update of Virtual Network attributes
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Christian González <cgonzalez@opennebula.io>
* VNET updates trigger a driver action on running VMs with NICs in the
network.
* VNET includes a sets with VM status: updated, outdated, error and
updating. With VMs in each state.
* VNET flags error situations with a new state UPDATE_FAILURE.
* The same procedure is applied when an AR is updated (only VMs in that
AR are updated).
* A new options in the one.vn.recover API call enable to recover or
retry this VM update operations.
* The following attributes can be live-updated per VNET driver:
- PHYDEV (novlan, vlan, ovs driver)
- MTU (vlan, ovs driver)
- VLAN_ID (vlan, ovs driver)
- QINQ_TYPE (ovs driver)
- CVLANS (ovs driver)
- VLAN_TAGGED_ID (ovs driver)
- OUTER_VLAN_ID (ovs driver)
- INBOUND_AVG_BW (SG, ovs driver + KVM)
- INBOUND_PEAK_BW (SG, ovs driver + KVM)
- INBOUND_PEAK_KB (SG, ovs driver + KVM)
- OUTBOUND_AVG_BW (SG, ovs driver + KVM)
- OUTBOUND_PEAK_BW (SG, ovs driver + KVM)
- OUTBOUND_PEAK_KB (SG, ovs driver + KVM)
* New API call one.vm.updatenic, allows to update individual NICs
without the need of detach/attach (only QoS supported).
* Update operations for: 802.1Q, bridge, fw, ovswitch, ovswitch_vxlan
and vxlan network drivers.
* VNET attributes (old values) stored in VNET_UPDATE to allow
implementation of update operations. The attribute is removed after a
successful update.
* Updates to CLI onevnet (--retry option) / onevm (nicupdate command)
* XSD files updated to reflect the new data model
* Ruby and JAVA bindings updated: new VNET state and recover option, new
VM API call.
* Suntone and Fireedge implementation (lease status, recover option, new
states)
TODO: Virtual Functions does not support this functionality
iii
2022-11-16 15:35:29 +03:00
src/vnm_mad/remotes/ovswitch_vxlan/update_nic \
2022-04-09 15:48:15 +03:00
src/vnm_mad/remotes/ovswitch_vxlan/OpenvSwitchVXLAN.rb \
src/vnm_mad/remotes/ovswitch_vxlan/vnet_create \
src/vnm_mad/remotes/ovswitch_vxlan/vnet_delete"
2018-02-28 19:31:38 +03:00
2017-03-30 13:14:49 +03:00
NETWORK_VCENTER_FILES = " src/vnm_mad/remotes/vcenter/pre \
src/vnm_mad/remotes/vcenter/post \
2020-05-07 16:05:20 +03:00
src/vnm_mad/remotes/vcenter/clean \
2022-04-09 15:48:15 +03:00
src/vnm_mad/remotes/vcenter/update_sg \
2022-12-05 14:24:55 +03:00
src/vnm_mad/remotes/vcenter/update_nic \
2022-04-09 15:48:15 +03:00
src/vnm_mad/remotes/vcenter/virtual_network_xml.rb \
src/vnm_mad/remotes/vcenter/vnet_create \
src/vnm_mad/remotes/vcenter/vnet_delete"
2017-03-30 13:14:49 +03:00
2021-02-18 20:09:05 +03:00
NETWORK_ELASTIC_FILES = " src/vnm_mad/remotes/elastic/elastic.rb \
2020-12-15 12:07:06 +03:00
src/vnm_mad/remotes/elastic/clean \
2021-03-30 11:12:33 +03:00
src/vnm_mad/remotes/elastic/remote_clean \
2020-12-15 12:07:06 +03:00
src/vnm_mad/remotes/elastic/post \
2021-03-30 11:12:33 +03:00
src/vnm_mad/remotes/elastic/remote_post \
2020-12-15 12:07:06 +03:00
src/vnm_mad/remotes/elastic/pre \
2022-04-09 15:48:15 +03:00
src/vnm_mad/remotes/elastic/update_sg \
2022-12-05 14:24:55 +03:00
src/vnm_mad/remotes/elastic/update_nic \
2022-04-09 15:48:15 +03:00
src/vnm_mad/remotes/elastic/vnet_create \
src/vnm_mad/remotes/elastic/vnet_delete"
2019-07-04 13:38:25 +03:00
2021-04-30 12:49:51 +03:00
NETWORK_NODEPORT_FILES = " src/vnm_mad/remotes/nodeport/nodeport.rb \
src/vnm_mad/remotes/nodeport/clean \
src/vnm_mad/remotes/nodeport/post \
src/vnm_mad/remotes/nodeport/pre \
2022-04-09 15:48:15 +03:00
src/vnm_mad/remotes/nodeport/update_sg \
2022-12-05 14:24:55 +03:00
src/vnm_mad/remotes/nodeport/update_nic \
2022-04-09 15:48:15 +03:00
src/vnm_mad/remotes/nodeport/vnet_create \
src/vnm_mad/remotes/nodeport/vnet_delete"
2021-04-30 12:49:51 +03:00
2018-01-18 14:27:38 +03:00
#-------------------------------------------------------------------------------
# Virtual Network Manager drivers configuration to be installed under $REMOTES_LOCATION/etc/vnm
#-------------------------------------------------------------------------------
NETWORK_ETC_FILES = "src/vnm_mad/remotes/OpenNebulaNetwork.conf"
2016-08-19 19:24:32 +03:00
#-------------------------------------------------------------------------------
2019-07-04 13:38:25 +03:00
# IPAM dummy drivers to be installed under $REMOTES_LOCATION/ipam
2016-08-19 19:24:32 +03:00
#-------------------------------------------------------------------------------
IPAM_DRIVER_DUMMY_SCRIPTS = " src/ipamm_mad/remotes/dummy/register_address_range \
2021-05-19 13:24:19 +03:00
src/ipamm_mad/remotes/dummy/unregister_address_range \
src/ipamm_mad/remotes/dummy/allocate_address \
src/ipamm_mad/remotes/dummy/get_address \
src/ipamm_mad/remotes/dummy/free_address"
2016-08-19 19:24:32 +03:00
2019-07-04 13:38:25 +03:00
#-------------------------------------------------------------------------------
2021-09-17 10:51:14 +03:00
# IPAM Equinix drivers to be installed under $REMOTES_LOCATION/ipam
2019-07-04 13:38:25 +03:00
#-------------------------------------------------------------------------------
2021-09-17 10:51:14 +03:00
IPAM_DRIVER_EQUINIX_SCRIPTS = " src/ipamm_mad/remotes/equinix/register_address_range \
src/ipamm_mad/remotes/equinix/unregister_address_range \
src/ipamm_mad/remotes/equinix/allocate_address \
src/ipamm_mad/remotes/equinix/get_address \
src/ipamm_mad/remotes/equinix/free_address"
2021-05-19 13:24:19 +03:00
#-------------------------------------------------------------------------------
# IPAM Vultr drivers to be installed under $REMOTES_LOCATION/ipam
#-------------------------------------------------------------------------------
IPAM_DRIVER_VULTR_SCRIPTS = " src/ipamm_mad/remotes/vultr/register_address_range \
src/ipamm_mad/remotes/vultr/unregister_address_range \
src/ipamm_mad/remotes/vultr/allocate_address \
src/ipamm_mad/remotes/vultr/get_address \
src/ipamm_mad/remotes/vultr/free_address"
2021-04-30 12:49:51 +03:00
2020-11-17 11:40:24 +03:00
#-------------------------------------------------------------------------------
# IPAM EC2 drivers to be installed under $REMOTES_LOCATION/ipam
#-------------------------------------------------------------------------------
2020-12-15 12:07:06 +03:00
IPAM_DRIVER_EC2_SCRIPTS = " src/ipamm_mad/remotes/aws/register_address_range \
2021-05-19 13:24:19 +03:00
src/ipamm_mad/remotes/aws/unregister_address_range \
src/ipamm_mad/remotes/aws/allocate_address \
src/ipamm_mad/remotes/aws/get_address \
src/ipamm_mad/remotes/aws/free_address"
2019-07-04 13:38:25 +03:00
2009-01-02 17:58:51 +03:00
#-------------------------------------------------------------------------------
# Transfer Manager commands, to be installed under $LIB_LOCATION/tm_commands
2012-02-29 01:52:48 +04:00
# - SHARED TM, $VAR_LOCATION/tm/shared
2013-11-11 20:43:57 +04:00
# - FS_LVM TM, $VAR_LOCATION/tm/fs_lvm
2012-03-23 23:26:53 +04:00
# - QCOW2 TM, $VAR_LOCATION/tm/qcow2
2012-02-29 01:52:48 +04:00
# - SSH TM, $VAR_LOCATION/tm/ssh
2012-04-04 13:13:39 +04:00
# - DUMMY TM, $VAR_LOCATION/tm/dummy
2013-02-14 21:55:37 +04:00
# - CEPH TM, $VAR_LOCATION/tm/ceph
2014-06-17 21:31:53 +04:00
# - DEV TM, $VAR_LOCATION/tm/dev
2016-05-09 13:09:07 +03:00
# - ISCSI TM, $VAR_LOCATION/tm/iscsi_libvirt
2012-02-29 01:52:48 +04:00
#-------------------------------------------------------------------------------
TM_FILES = "src/tm_mad/tm_common.sh"
F #5516: New backup interface for OpenNebula
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Neal Hansen <nhansen@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io>
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
BACKUP INTERFACE
=================
* Backups are exposed through a a special Datastore (BACKUP_DS) and
Image (BACKUP) types. These new types can only be used for backup'ing
up VMs. This approach allows to:
- Implement tier based backup policies (backups made on different
locations).
- Leverage access control and quota systems
- Support differnt storage and backup technologies
* Backup interface for the VMs:
- VM configures backups with BACKUP_CONFIG. This attribute can be set
in the VM template or updated with updateconf API call. It can include:
+ BACKUP_VOLATILE: To backup or not volatile disks
+ FS_FREEZE: How the FS is freeze for running VMs (qemu-agent,
suspend or none). When possible backups are crash consistent.
+ KEEP_LAST: keep only a given number of backups.
- Backups are initiated by the one.vm.backup API call that requires
the target Datastore to perform the backup (one-shot). This is
exposed by the onevm backup command.
- Backups can be periodic through scheduled actions.
- Backup configuration is updated with one.vm.updateconf API call.
* Restore interface:
- Restores are initiated by the one.image.restore API call. This is
exposed by oneimage restore command.
- Restore include configurable options for the VM template
+ NO_IP: to not preserve IP addresses (but keep the NICs and network
mapping)
+ NO_NIC: to not preserve network mappings
- Other template attributes:
+ Clean PCI devices, including network configuration in case of TYPE=NIC
attributes. By default it removes SHORT_ADDRESS and leave the "auto"
selection attributes.
+ Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node
- It is possible to restore single files stored in the repository by
using the backup specific URL.
* Sunstone (Ruby version) has been updated to expose this feautres.
BACKUP DRIVERS & IMPLEMENTATION
===============================
* Backup operation is implemented by a combination of 3 driver operations:
- VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate
backups for RUNNING VMs.
- TM. This commit introduces 2 new operations (and their
corresponding _live variants):
+ pre_backup(_live): Prepares the disks to be back'ed up in the
repository. It is specific to the driver: (i) ceph uses the export
operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as
needed.
+ post_backup(_live): Performs cleanning operations, i.e. KVM
snapshots or tmp dirs.
- DATASTORE. Each backup technology is represented by its
corresponfing driver, that needs to implement:
+ backup: it takes the VM disks in file (qcow2) format and stores it
the backup repository.
+ restore: it takes a backup image and restores the associated disks
and VM template.
+ monitor: to gather available space in the repository
+ rm: to remove existing backups
+ stat: to return the "restored" size of a disk stored in a backup
+ downloader pseudo-URL handler: in the form
<backup_proto>://<driver_snapshot_id>/<disk filename>
BACKUP MANAGEMENT
=================
Backup actions may potentially take some time, leaving some vmm_exec threads in
use for a long time, stucking other vmm operations. Backups are planned
by the scheduler through the sched action interface.
Two attributes has been added to sched.conf:
* MAX_BACKUPS max active backup operations in the cloud. No more
backups will be started beyond this limit.
* MAX_BACKUPS_HOST max number of backups per host
* Fix onevm CLI to properly show and manage schedule actions. --schedule
supports now, as well as relative times +<seconds_from_stime>
onvm backup --schedule now -d 100 63
* Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs
to use the batch interface or request specific permissions
Internal restructure of Scheduler:
- All sched_actions interface is now in SchedActionsXML class and files.
This class uses references to VM XML, and MUST be used in the same
lifetime scope.
- XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as
static functions.
- VirtualMachineActionPool includes counters for active backups (total
and per host).
SUPPORTED PLATFORMS
====================
* hypervisor: KVM
* TM: qcow2/shared/ssh, ceph
* backup: restic, rsync
Notes on Ceph
* Ceph backups are performed in the following steps:
1. A snapshot of each disk is taken (group snapshots cannot be used as
it seems we cannot export the disks afterwards)
2. Disks are export to a file
3. File is converted to qcow2 format
4. Disk files are upload to the backup repo
TODO:
* Confirm crash consistent snapshots cannot be used in Ceph
TODO:
* Check if using VM dir instead of full path is better to accomodate
DS migrations i.e.:
- Current path: /var/lib/one/datastores/100/53/backup/disk.0
- Proposal: 53/backup/disk.0
RESTIC DRIVER
=============
Developed together with this feature is part of the EE edtion.
* It supports the SFTP protocol, the following attributes are
supported:
- RESTIC_SFTP_SERVER
- RESTIC_SFTP_USER: only if different from oneadmin
- RESTIC_PASSWORD
- RESTIC_IONICE: Run restic under a given ionice priority (class 2)
- RESTIC_NICE: Run restic under a given nice
- RESTIC_BWLIMIT: Limit restic upload/download BW
- RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes:
off, auto, max). This requires repositories version 2. By default,
auto is used (average compression without to much CPU usage)
- RESTIC_CONNECTIONS: Sets the number of concurrent connections to a
backend (5 by default). For high-latency backends this number can be
increased.
* downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name>
snapshot_id is the restic snapshot hash. To recover single disk images
from a backup. This URLs support:
- RESTIC_CONNECTIONS
- RESTIC_BWLIMIT
- RESTIC_IONICE
- RESTIC_NICE
These options needs to be defined in the associated datastore.
RSYNC DRIVER
=============
A rsync driver is included as part of the CE distribution. It uses the
rsync tool to store backups in a remote server through SSH:
* The following attributes are supported to configure the backup
datastore:
- RSYNC_HOST
- RSYNC_USER
- RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default)
* downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover
single files from an existing backup. (RSYNC_HOST and RSYN_USER needs
to be set in ds_id
EMULATOR_CPUS
=============
This commit includes a non related backup feature:
* Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the
CPU IDs where the emulator threads will be pinned. If this value is
not defined the allocated CPU wll be used when using a PIN policy.
(cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b)
F OpenNebula/one#5516: adding rsync backup driver
(cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305)
F OpenNebula/one#5516: update install.sh, add vmid to source, some polish
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d)
F OpenNebula/one#5516: cleanup
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340)
F OpenNebula/one#5516: update downloader, default args, size check
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305)
LL
(cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
TM_LIB_FILES = " src/tm_mad/lib/kvm.rb \
src/tm_mad/lib/tm_action.rb \
2022-11-07 00:54:17 +03:00
src/tm_mad/lib/backup_qcow2.rb \
2023-01-21 15:36:59 +03:00
src/tm_mad/lib/datastore.rb \
F #5516: New backup interface for OpenNebula
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Neal Hansen <nhansen@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io>
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
BACKUP INTERFACE
=================
* Backups are exposed through a a special Datastore (BACKUP_DS) and
Image (BACKUP) types. These new types can only be used for backup'ing
up VMs. This approach allows to:
- Implement tier based backup policies (backups made on different
locations).
- Leverage access control and quota systems
- Support differnt storage and backup technologies
* Backup interface for the VMs:
- VM configures backups with BACKUP_CONFIG. This attribute can be set
in the VM template or updated with updateconf API call. It can include:
+ BACKUP_VOLATILE: To backup or not volatile disks
+ FS_FREEZE: How the FS is freeze for running VMs (qemu-agent,
suspend or none). When possible backups are crash consistent.
+ KEEP_LAST: keep only a given number of backups.
- Backups are initiated by the one.vm.backup API call that requires
the target Datastore to perform the backup (one-shot). This is
exposed by the onevm backup command.
- Backups can be periodic through scheduled actions.
- Backup configuration is updated with one.vm.updateconf API call.
* Restore interface:
- Restores are initiated by the one.image.restore API call. This is
exposed by oneimage restore command.
- Restore include configurable options for the VM template
+ NO_IP: to not preserve IP addresses (but keep the NICs and network
mapping)
+ NO_NIC: to not preserve network mappings
- Other template attributes:
+ Clean PCI devices, including network configuration in case of TYPE=NIC
attributes. By default it removes SHORT_ADDRESS and leave the "auto"
selection attributes.
+ Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node
- It is possible to restore single files stored in the repository by
using the backup specific URL.
* Sunstone (Ruby version) has been updated to expose this feautres.
BACKUP DRIVERS & IMPLEMENTATION
===============================
* Backup operation is implemented by a combination of 3 driver operations:
- VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate
backups for RUNNING VMs.
- TM. This commit introduces 2 new operations (and their
corresponding _live variants):
+ pre_backup(_live): Prepares the disks to be back'ed up in the
repository. It is specific to the driver: (i) ceph uses the export
operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as
needed.
+ post_backup(_live): Performs cleanning operations, i.e. KVM
snapshots or tmp dirs.
- DATASTORE. Each backup technology is represented by its
corresponfing driver, that needs to implement:
+ backup: it takes the VM disks in file (qcow2) format and stores it
the backup repository.
+ restore: it takes a backup image and restores the associated disks
and VM template.
+ monitor: to gather available space in the repository
+ rm: to remove existing backups
+ stat: to return the "restored" size of a disk stored in a backup
+ downloader pseudo-URL handler: in the form
<backup_proto>://<driver_snapshot_id>/<disk filename>
BACKUP MANAGEMENT
=================
Backup actions may potentially take some time, leaving some vmm_exec threads in
use for a long time, stucking other vmm operations. Backups are planned
by the scheduler through the sched action interface.
Two attributes has been added to sched.conf:
* MAX_BACKUPS max active backup operations in the cloud. No more
backups will be started beyond this limit.
* MAX_BACKUPS_HOST max number of backups per host
* Fix onevm CLI to properly show and manage schedule actions. --schedule
supports now, as well as relative times +<seconds_from_stime>
onvm backup --schedule now -d 100 63
* Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs
to use the batch interface or request specific permissions
Internal restructure of Scheduler:
- All sched_actions interface is now in SchedActionsXML class and files.
This class uses references to VM XML, and MUST be used in the same
lifetime scope.
- XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as
static functions.
- VirtualMachineActionPool includes counters for active backups (total
and per host).
SUPPORTED PLATFORMS
====================
* hypervisor: KVM
* TM: qcow2/shared/ssh, ceph
* backup: restic, rsync
Notes on Ceph
* Ceph backups are performed in the following steps:
1. A snapshot of each disk is taken (group snapshots cannot be used as
it seems we cannot export the disks afterwards)
2. Disks are export to a file
3. File is converted to qcow2 format
4. Disk files are upload to the backup repo
TODO:
* Confirm crash consistent snapshots cannot be used in Ceph
TODO:
* Check if using VM dir instead of full path is better to accomodate
DS migrations i.e.:
- Current path: /var/lib/one/datastores/100/53/backup/disk.0
- Proposal: 53/backup/disk.0
RESTIC DRIVER
=============
Developed together with this feature is part of the EE edtion.
* It supports the SFTP protocol, the following attributes are
supported:
- RESTIC_SFTP_SERVER
- RESTIC_SFTP_USER: only if different from oneadmin
- RESTIC_PASSWORD
- RESTIC_IONICE: Run restic under a given ionice priority (class 2)
- RESTIC_NICE: Run restic under a given nice
- RESTIC_BWLIMIT: Limit restic upload/download BW
- RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes:
off, auto, max). This requires repositories version 2. By default,
auto is used (average compression without to much CPU usage)
- RESTIC_CONNECTIONS: Sets the number of concurrent connections to a
backend (5 by default). For high-latency backends this number can be
increased.
* downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name>
snapshot_id is the restic snapshot hash. To recover single disk images
from a backup. This URLs support:
- RESTIC_CONNECTIONS
- RESTIC_BWLIMIT
- RESTIC_IONICE
- RESTIC_NICE
These options needs to be defined in the associated datastore.
RSYNC DRIVER
=============
A rsync driver is included as part of the CE distribution. It uses the
rsync tool to store backups in a remote server through SSH:
* The following attributes are supported to configure the backup
datastore:
- RSYNC_HOST
- RSYNC_USER
- RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default)
* downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover
single files from an existing backup. (RSYNC_HOST and RSYN_USER needs
to be set in ds_id
EMULATOR_CPUS
=============
This commit includes a non related backup feature:
* Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the
CPU IDs where the emulator threads will be pinned. If this value is
not defined the allocated CPU wll be used when using a PIN policy.
(cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b)
F OpenNebula/one#5516: adding rsync backup driver
(cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305)
F OpenNebula/one#5516: update install.sh, add vmid to source, some polish
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d)
F OpenNebula/one#5516: cleanup
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340)
F OpenNebula/one#5516: update downloader, default args, size check
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305)
LL
(cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
src/tm_mad/lib/backup.rb"
2012-02-29 01:52:48 +04:00
TM_SHARED_FILES = " src/tm_mad/shared/clone \
2021-05-26 19:21:13 +03:00
src/tm_mad/shared/clone.ssh \
2012-09-24 18:18:17 +04:00
src/tm_mad/shared/delete \
src/tm_mad/shared/ln \
2020-02-21 18:11:23 +03:00
src/tm_mad/shared/ln.ssh \
2021-05-26 19:21:13 +03:00
src/tm_mad/shared/monitor \
2012-09-24 18:18:17 +04:00
src/tm_mad/shared/mkswap \
src/tm_mad/shared/mkimage \
src/tm_mad/shared/mv \
2021-05-26 19:21:13 +03:00
src/tm_mad/shared/mv.ssh \
2012-09-24 18:18:17 +04:00
src/tm_mad/shared/context \
src/tm_mad/shared/premigrate \
src/tm_mad/shared/postmigrate \
2015-06-04 22:43:32 +03:00
src/tm_mad/shared/failmigrate \
2013-03-07 19:48:23 +04:00
src/tm_mad/shared/mvds \
2020-02-21 18:11:23 +03:00
src/tm_mad/shared/mvds.ssh \
2015-06-08 16:00:32 +03:00
src/tm_mad/shared/snap_create \
2021-05-26 19:21:13 +03:00
src/tm_mad/shared/snap_create.ssh \
2015-08-12 13:31:58 +03:00
src/tm_mad/shared/snap_create_live \
2021-05-26 19:21:13 +03:00
src/tm_mad/shared/snap_create_live.ssh \
2015-06-08 16:00:32 +03:00
src/tm_mad/shared/snap_delete \
2021-05-26 19:21:13 +03:00
src/tm_mad/shared/snap_delete.ssh \
2015-06-08 16:00:32 +03:00
src/tm_mad/shared/snap_revert \
2021-05-26 19:21:13 +03:00
src/tm_mad/shared/snap_revert.ssh \
2016-11-23 21:45:40 +03:00
src/tm_mad/shared/cpds \
2021-05-26 19:21:13 +03:00
src/tm_mad/shared/cpds.ssh \
F #5516: New backup interface for OpenNebula
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Neal Hansen <nhansen@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io>
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
BACKUP INTERFACE
=================
* Backups are exposed through a a special Datastore (BACKUP_DS) and
Image (BACKUP) types. These new types can only be used for backup'ing
up VMs. This approach allows to:
- Implement tier based backup policies (backups made on different
locations).
- Leverage access control and quota systems
- Support differnt storage and backup technologies
* Backup interface for the VMs:
- VM configures backups with BACKUP_CONFIG. This attribute can be set
in the VM template or updated with updateconf API call. It can include:
+ BACKUP_VOLATILE: To backup or not volatile disks
+ FS_FREEZE: How the FS is freeze for running VMs (qemu-agent,
suspend or none). When possible backups are crash consistent.
+ KEEP_LAST: keep only a given number of backups.
- Backups are initiated by the one.vm.backup API call that requires
the target Datastore to perform the backup (one-shot). This is
exposed by the onevm backup command.
- Backups can be periodic through scheduled actions.
- Backup configuration is updated with one.vm.updateconf API call.
* Restore interface:
- Restores are initiated by the one.image.restore API call. This is
exposed by oneimage restore command.
- Restore include configurable options for the VM template
+ NO_IP: to not preserve IP addresses (but keep the NICs and network
mapping)
+ NO_NIC: to not preserve network mappings
- Other template attributes:
+ Clean PCI devices, including network configuration in case of TYPE=NIC
attributes. By default it removes SHORT_ADDRESS and leave the "auto"
selection attributes.
+ Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node
- It is possible to restore single files stored in the repository by
using the backup specific URL.
* Sunstone (Ruby version) has been updated to expose this feautres.
BACKUP DRIVERS & IMPLEMENTATION
===============================
* Backup operation is implemented by a combination of 3 driver operations:
- VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate
backups for RUNNING VMs.
- TM. This commit introduces 2 new operations (and their
corresponding _live variants):
+ pre_backup(_live): Prepares the disks to be back'ed up in the
repository. It is specific to the driver: (i) ceph uses the export
operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as
needed.
+ post_backup(_live): Performs cleanning operations, i.e. KVM
snapshots or tmp dirs.
- DATASTORE. Each backup technology is represented by its
corresponfing driver, that needs to implement:
+ backup: it takes the VM disks in file (qcow2) format and stores it
the backup repository.
+ restore: it takes a backup image and restores the associated disks
and VM template.
+ monitor: to gather available space in the repository
+ rm: to remove existing backups
+ stat: to return the "restored" size of a disk stored in a backup
+ downloader pseudo-URL handler: in the form
<backup_proto>://<driver_snapshot_id>/<disk filename>
BACKUP MANAGEMENT
=================
Backup actions may potentially take some time, leaving some vmm_exec threads in
use for a long time, stucking other vmm operations. Backups are planned
by the scheduler through the sched action interface.
Two attributes has been added to sched.conf:
* MAX_BACKUPS max active backup operations in the cloud. No more
backups will be started beyond this limit.
* MAX_BACKUPS_HOST max number of backups per host
* Fix onevm CLI to properly show and manage schedule actions. --schedule
supports now, as well as relative times +<seconds_from_stime>
onvm backup --schedule now -d 100 63
* Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs
to use the batch interface or request specific permissions
Internal restructure of Scheduler:
- All sched_actions interface is now in SchedActionsXML class and files.
This class uses references to VM XML, and MUST be used in the same
lifetime scope.
- XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as
static functions.
- VirtualMachineActionPool includes counters for active backups (total
and per host).
SUPPORTED PLATFORMS
====================
* hypervisor: KVM
* TM: qcow2/shared/ssh, ceph
* backup: restic, rsync
Notes on Ceph
* Ceph backups are performed in the following steps:
1. A snapshot of each disk is taken (group snapshots cannot be used as
it seems we cannot export the disks afterwards)
2. Disks are export to a file
3. File is converted to qcow2 format
4. Disk files are upload to the backup repo
TODO:
* Confirm crash consistent snapshots cannot be used in Ceph
TODO:
* Check if using VM dir instead of full path is better to accomodate
DS migrations i.e.:
- Current path: /var/lib/one/datastores/100/53/backup/disk.0
- Proposal: 53/backup/disk.0
RESTIC DRIVER
=============
Developed together with this feature is part of the EE edtion.
* It supports the SFTP protocol, the following attributes are
supported:
- RESTIC_SFTP_SERVER
- RESTIC_SFTP_USER: only if different from oneadmin
- RESTIC_PASSWORD
- RESTIC_IONICE: Run restic under a given ionice priority (class 2)
- RESTIC_NICE: Run restic under a given nice
- RESTIC_BWLIMIT: Limit restic upload/download BW
- RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes:
off, auto, max). This requires repositories version 2. By default,
auto is used (average compression without to much CPU usage)
- RESTIC_CONNECTIONS: Sets the number of concurrent connections to a
backend (5 by default). For high-latency backends this number can be
increased.
* downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name>
snapshot_id is the restic snapshot hash. To recover single disk images
from a backup. This URLs support:
- RESTIC_CONNECTIONS
- RESTIC_BWLIMIT
- RESTIC_IONICE
- RESTIC_NICE
These options needs to be defined in the associated datastore.
RSYNC DRIVER
=============
A rsync driver is included as part of the CE distribution. It uses the
rsync tool to store backups in a remote server through SSH:
* The following attributes are supported to configure the backup
datastore:
- RSYNC_HOST
- RSYNC_USER
- RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default)
* downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover
single files from an existing backup. (RSYNC_HOST and RSYN_USER needs
to be set in ds_id
EMULATOR_CPUS
=============
This commit includes a non related backup feature:
* Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the
CPU IDs where the emulator threads will be pinned. If this value is
not defined the allocated CPU wll be used when using a PIN policy.
(cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b)
F OpenNebula/one#5516: adding rsync backup driver
(cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305)
F OpenNebula/one#5516: update install.sh, add vmid to source, some polish
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d)
F OpenNebula/one#5516: cleanup
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340)
F OpenNebula/one#5516: update downloader, default args, size check
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305)
LL
(cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
src/tm_mad/shared/resize \
src/tm_mad/shared/prebackup_live \
src/tm_mad/shared/prebackup \
src/tm_mad/shared/postbackup_live \
src/tm_mad/shared/postbackup"
TM_QCOW2_FILES = " ${ TM_SHARED_FILES } "
2012-02-29 01:52:48 +04:00
2018-01-31 18:54:50 +03:00
TM_FS_LVM_FILES = " src/tm_mad/fs_lvm/activate \
src/tm_mad/fs_lvm/clone \
2016-04-07 12:12:57 +03:00
src/tm_mad/fs_lvm/context \
2013-11-11 20:43:57 +04:00
src/tm_mad/fs_lvm/ln \
2016-04-07 12:12:57 +03:00
src/tm_mad/fs_lvm/monitor \
2016-04-07 20:26:48 +03:00
src/tm_mad/fs_lvm/mkswap \
src/tm_mad/fs_lvm/mkimage \
2013-11-11 20:43:57 +04:00
src/tm_mad/fs_lvm/mv \
src/tm_mad/fs_lvm/mvds \
src/tm_mad/fs_lvm/cpds \
src/tm_mad/fs_lvm/premigrate \
src/tm_mad/fs_lvm/postmigrate \
2015-06-08 16:00:32 +03:00
src/tm_mad/fs_lvm/snap_create \
2015-08-12 13:31:58 +03:00
src/tm_mad/fs_lvm/snap_create_live \
2015-06-08 16:00:32 +03:00
src/tm_mad/fs_lvm/snap_delete \
src/tm_mad/fs_lvm/snap_revert \
2015-06-04 22:43:32 +03:00
src/tm_mad/fs_lvm/failmigrate \
2016-11-24 20:03:42 +03:00
src/tm_mad/fs_lvm/delete \
F #5516: New backup interface for OpenNebula
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Neal Hansen <nhansen@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io>
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
BACKUP INTERFACE
=================
* Backups are exposed through a a special Datastore (BACKUP_DS) and
Image (BACKUP) types. These new types can only be used for backup'ing
up VMs. This approach allows to:
- Implement tier based backup policies (backups made on different
locations).
- Leverage access control and quota systems
- Support differnt storage and backup technologies
* Backup interface for the VMs:
- VM configures backups with BACKUP_CONFIG. This attribute can be set
in the VM template or updated with updateconf API call. It can include:
+ BACKUP_VOLATILE: To backup or not volatile disks
+ FS_FREEZE: How the FS is freeze for running VMs (qemu-agent,
suspend or none). When possible backups are crash consistent.
+ KEEP_LAST: keep only a given number of backups.
- Backups are initiated by the one.vm.backup API call that requires
the target Datastore to perform the backup (one-shot). This is
exposed by the onevm backup command.
- Backups can be periodic through scheduled actions.
- Backup configuration is updated with one.vm.updateconf API call.
* Restore interface:
- Restores are initiated by the one.image.restore API call. This is
exposed by oneimage restore command.
- Restore include configurable options for the VM template
+ NO_IP: to not preserve IP addresses (but keep the NICs and network
mapping)
+ NO_NIC: to not preserve network mappings
- Other template attributes:
+ Clean PCI devices, including network configuration in case of TYPE=NIC
attributes. By default it removes SHORT_ADDRESS and leave the "auto"
selection attributes.
+ Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node
- It is possible to restore single files stored in the repository by
using the backup specific URL.
* Sunstone (Ruby version) has been updated to expose this feautres.
BACKUP DRIVERS & IMPLEMENTATION
===============================
* Backup operation is implemented by a combination of 3 driver operations:
- VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate
backups for RUNNING VMs.
- TM. This commit introduces 2 new operations (and their
corresponding _live variants):
+ pre_backup(_live): Prepares the disks to be back'ed up in the
repository. It is specific to the driver: (i) ceph uses the export
operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as
needed.
+ post_backup(_live): Performs cleanning operations, i.e. KVM
snapshots or tmp dirs.
- DATASTORE. Each backup technology is represented by its
corresponfing driver, that needs to implement:
+ backup: it takes the VM disks in file (qcow2) format and stores it
the backup repository.
+ restore: it takes a backup image and restores the associated disks
and VM template.
+ monitor: to gather available space in the repository
+ rm: to remove existing backups
+ stat: to return the "restored" size of a disk stored in a backup
+ downloader pseudo-URL handler: in the form
<backup_proto>://<driver_snapshot_id>/<disk filename>
BACKUP MANAGEMENT
=================
Backup actions may potentially take some time, leaving some vmm_exec threads in
use for a long time, stucking other vmm operations. Backups are planned
by the scheduler through the sched action interface.
Two attributes has been added to sched.conf:
* MAX_BACKUPS max active backup operations in the cloud. No more
backups will be started beyond this limit.
* MAX_BACKUPS_HOST max number of backups per host
* Fix onevm CLI to properly show and manage schedule actions. --schedule
supports now, as well as relative times +<seconds_from_stime>
onvm backup --schedule now -d 100 63
* Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs
to use the batch interface or request specific permissions
Internal restructure of Scheduler:
- All sched_actions interface is now in SchedActionsXML class and files.
This class uses references to VM XML, and MUST be used in the same
lifetime scope.
- XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as
static functions.
- VirtualMachineActionPool includes counters for active backups (total
and per host).
SUPPORTED PLATFORMS
====================
* hypervisor: KVM
* TM: qcow2/shared/ssh, ceph
* backup: restic, rsync
Notes on Ceph
* Ceph backups are performed in the following steps:
1. A snapshot of each disk is taken (group snapshots cannot be used as
it seems we cannot export the disks afterwards)
2. Disks are export to a file
3. File is converted to qcow2 format
4. Disk files are upload to the backup repo
TODO:
* Confirm crash consistent snapshots cannot be used in Ceph
TODO:
* Check if using VM dir instead of full path is better to accomodate
DS migrations i.e.:
- Current path: /var/lib/one/datastores/100/53/backup/disk.0
- Proposal: 53/backup/disk.0
RESTIC DRIVER
=============
Developed together with this feature is part of the EE edtion.
* It supports the SFTP protocol, the following attributes are
supported:
- RESTIC_SFTP_SERVER
- RESTIC_SFTP_USER: only if different from oneadmin
- RESTIC_PASSWORD
- RESTIC_IONICE: Run restic under a given ionice priority (class 2)
- RESTIC_NICE: Run restic under a given nice
- RESTIC_BWLIMIT: Limit restic upload/download BW
- RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes:
off, auto, max). This requires repositories version 2. By default,
auto is used (average compression without to much CPU usage)
- RESTIC_CONNECTIONS: Sets the number of concurrent connections to a
backend (5 by default). For high-latency backends this number can be
increased.
* downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name>
snapshot_id is the restic snapshot hash. To recover single disk images
from a backup. This URLs support:
- RESTIC_CONNECTIONS
- RESTIC_BWLIMIT
- RESTIC_IONICE
- RESTIC_NICE
These options needs to be defined in the associated datastore.
RSYNC DRIVER
=============
A rsync driver is included as part of the CE distribution. It uses the
rsync tool to store backups in a remote server through SSH:
* The following attributes are supported to configure the backup
datastore:
- RSYNC_HOST
- RSYNC_USER
- RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default)
* downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover
single files from an existing backup. (RSYNC_HOST and RSYN_USER needs
to be set in ds_id
EMULATOR_CPUS
=============
This commit includes a non related backup feature:
* Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the
CPU IDs where the emulator threads will be pinned. If this value is
not defined the allocated CPU wll be used when using a PIN policy.
(cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b)
F OpenNebula/one#5516: adding rsync backup driver
(cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305)
F OpenNebula/one#5516: update install.sh, add vmid to source, some polish
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d)
F OpenNebula/one#5516: cleanup
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340)
F OpenNebula/one#5516: update downloader, default args, size check
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305)
LL
(cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
src/tm_mad/fs_lvm/resize \
src/tm_mad/fs_lvm/prebackup_live \
src/tm_mad/fs_lvm/prebackup \
src/tm_mad/fs_lvm/postbackup_live \
src/tm_mad/fs_lvm/postbackup"
2013-09-16 18:22:36 +04:00
2019-02-04 17:55:54 +03:00
TM_FS_LVM_ETC_FILES = "src/tm_mad/fs_lvm/fs_lvm.conf"
2021-05-12 11:57:01 +03:00
TM_FS_LVM_SSH_FILES = " src/tm_mad/fs_lvm_ssh/activate \
src/tm_mad/fs_lvm_ssh/clone \
src/tm_mad/fs_lvm_ssh/context \
src/tm_mad/fs_lvm_ssh/ln \
src/tm_mad/fs_lvm_ssh/monitor \
src/tm_mad/fs_lvm_ssh/mkswap \
src/tm_mad/fs_lvm_ssh/mkimage \
src/tm_mad/fs_lvm_ssh/mv \
src/tm_mad/fs_lvm_ssh/mvds \
src/tm_mad/fs_lvm_ssh/cpds \
src/tm_mad/fs_lvm_ssh/premigrate \
src/tm_mad/fs_lvm_ssh/postmigrate \
src/tm_mad/fs_lvm_ssh/snap_create \
src/tm_mad/fs_lvm_ssh/snap_create_live \
src/tm_mad/fs_lvm_ssh/snap_delete \
src/tm_mad/fs_lvm_ssh/snap_revert \
src/tm_mad/fs_lvm_ssh/failmigrate \
src/tm_mad/fs_lvm_ssh/delete \
F #5516: New backup interface for OpenNebula
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Neal Hansen <nhansen@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io>
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
BACKUP INTERFACE
=================
* Backups are exposed through a a special Datastore (BACKUP_DS) and
Image (BACKUP) types. These new types can only be used for backup'ing
up VMs. This approach allows to:
- Implement tier based backup policies (backups made on different
locations).
- Leverage access control and quota systems
- Support differnt storage and backup technologies
* Backup interface for the VMs:
- VM configures backups with BACKUP_CONFIG. This attribute can be set
in the VM template or updated with updateconf API call. It can include:
+ BACKUP_VOLATILE: To backup or not volatile disks
+ FS_FREEZE: How the FS is freeze for running VMs (qemu-agent,
suspend or none). When possible backups are crash consistent.
+ KEEP_LAST: keep only a given number of backups.
- Backups are initiated by the one.vm.backup API call that requires
the target Datastore to perform the backup (one-shot). This is
exposed by the onevm backup command.
- Backups can be periodic through scheduled actions.
- Backup configuration is updated with one.vm.updateconf API call.
* Restore interface:
- Restores are initiated by the one.image.restore API call. This is
exposed by oneimage restore command.
- Restore include configurable options for the VM template
+ NO_IP: to not preserve IP addresses (but keep the NICs and network
mapping)
+ NO_NIC: to not preserve network mappings
- Other template attributes:
+ Clean PCI devices, including network configuration in case of TYPE=NIC
attributes. By default it removes SHORT_ADDRESS and leave the "auto"
selection attributes.
+ Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node
- It is possible to restore single files stored in the repository by
using the backup specific URL.
* Sunstone (Ruby version) has been updated to expose this feautres.
BACKUP DRIVERS & IMPLEMENTATION
===============================
* Backup operation is implemented by a combination of 3 driver operations:
- VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate
backups for RUNNING VMs.
- TM. This commit introduces 2 new operations (and their
corresponding _live variants):
+ pre_backup(_live): Prepares the disks to be back'ed up in the
repository. It is specific to the driver: (i) ceph uses the export
operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as
needed.
+ post_backup(_live): Performs cleanning operations, i.e. KVM
snapshots or tmp dirs.
- DATASTORE. Each backup technology is represented by its
corresponfing driver, that needs to implement:
+ backup: it takes the VM disks in file (qcow2) format and stores it
the backup repository.
+ restore: it takes a backup image and restores the associated disks
and VM template.
+ monitor: to gather available space in the repository
+ rm: to remove existing backups
+ stat: to return the "restored" size of a disk stored in a backup
+ downloader pseudo-URL handler: in the form
<backup_proto>://<driver_snapshot_id>/<disk filename>
BACKUP MANAGEMENT
=================
Backup actions may potentially take some time, leaving some vmm_exec threads in
use for a long time, stucking other vmm operations. Backups are planned
by the scheduler through the sched action interface.
Two attributes has been added to sched.conf:
* MAX_BACKUPS max active backup operations in the cloud. No more
backups will be started beyond this limit.
* MAX_BACKUPS_HOST max number of backups per host
* Fix onevm CLI to properly show and manage schedule actions. --schedule
supports now, as well as relative times +<seconds_from_stime>
onvm backup --schedule now -d 100 63
* Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs
to use the batch interface or request specific permissions
Internal restructure of Scheduler:
- All sched_actions interface is now in SchedActionsXML class and files.
This class uses references to VM XML, and MUST be used in the same
lifetime scope.
- XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as
static functions.
- VirtualMachineActionPool includes counters for active backups (total
and per host).
SUPPORTED PLATFORMS
====================
* hypervisor: KVM
* TM: qcow2/shared/ssh, ceph
* backup: restic, rsync
Notes on Ceph
* Ceph backups are performed in the following steps:
1. A snapshot of each disk is taken (group snapshots cannot be used as
it seems we cannot export the disks afterwards)
2. Disks are export to a file
3. File is converted to qcow2 format
4. Disk files are upload to the backup repo
TODO:
* Confirm crash consistent snapshots cannot be used in Ceph
TODO:
* Check if using VM dir instead of full path is better to accomodate
DS migrations i.e.:
- Current path: /var/lib/one/datastores/100/53/backup/disk.0
- Proposal: 53/backup/disk.0
RESTIC DRIVER
=============
Developed together with this feature is part of the EE edtion.
* It supports the SFTP protocol, the following attributes are
supported:
- RESTIC_SFTP_SERVER
- RESTIC_SFTP_USER: only if different from oneadmin
- RESTIC_PASSWORD
- RESTIC_IONICE: Run restic under a given ionice priority (class 2)
- RESTIC_NICE: Run restic under a given nice
- RESTIC_BWLIMIT: Limit restic upload/download BW
- RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes:
off, auto, max). This requires repositories version 2. By default,
auto is used (average compression without to much CPU usage)
- RESTIC_CONNECTIONS: Sets the number of concurrent connections to a
backend (5 by default). For high-latency backends this number can be
increased.
* downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name>
snapshot_id is the restic snapshot hash. To recover single disk images
from a backup. This URLs support:
- RESTIC_CONNECTIONS
- RESTIC_BWLIMIT
- RESTIC_IONICE
- RESTIC_NICE
These options needs to be defined in the associated datastore.
RSYNC DRIVER
=============
A rsync driver is included as part of the CE distribution. It uses the
rsync tool to store backups in a remote server through SSH:
* The following attributes are supported to configure the backup
datastore:
- RSYNC_HOST
- RSYNC_USER
- RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default)
* downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover
single files from an existing backup. (RSYNC_HOST and RSYN_USER needs
to be set in ds_id
EMULATOR_CPUS
=============
This commit includes a non related backup feature:
* Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the
CPU IDs where the emulator threads will be pinned. If this value is
not defined the allocated CPU wll be used when using a PIN policy.
(cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b)
F OpenNebula/one#5516: adding rsync backup driver
(cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305)
F OpenNebula/one#5516: update install.sh, add vmid to source, some polish
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d)
F OpenNebula/one#5516: cleanup
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340)
F OpenNebula/one#5516: update downloader, default args, size check
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305)
LL
(cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
src/tm_mad/fs_lvm_ssh/resize \
src/tm_mad/fs_lvm_ssh/prebackup_live \
src/tm_mad/fs_lvm_ssh/prebackup \
src/tm_mad/fs_lvm_ssh/postbackup_live \
src/tm_mad/fs_lvm_ssh/postbackup"
2012-03-23 23:26:53 +04:00
2012-02-29 01:52:48 +04:00
TM_SSH_FILES = " src/tm_mad/ssh/clone \
2020-10-02 13:58:18 +03:00
src/tm_mad/ssh/clone.replica \
2012-09-24 18:18:17 +04:00
src/tm_mad/ssh/delete \
src/tm_mad/ssh/ln \
2020-10-02 13:58:18 +03:00
src/tm_mad/ssh/ln.replica \
2012-09-24 18:18:17 +04:00
src/tm_mad/ssh/mkswap \
src/tm_mad/ssh/mkimage \
src/tm_mad/ssh/mv \
src/tm_mad/ssh/context \
src/tm_mad/ssh/premigrate \
src/tm_mad/ssh/postmigrate \
2015-06-04 22:43:32 +03:00
src/tm_mad/ssh/failmigrate \
2013-03-07 19:48:23 +04:00
src/tm_mad/ssh/mvds \
2015-06-08 16:00:32 +03:00
src/tm_mad/ssh/snap_create \
2015-08-12 13:31:58 +03:00
src/tm_mad/ssh/snap_create_live \
2015-06-08 16:00:32 +03:00
src/tm_mad/ssh/snap_delete \
src/tm_mad/ssh/snap_revert \
2015-11-04 19:31:56 +03:00
src/tm_mad/ssh/monitor \
2016-06-01 11:09:14 +03:00
src/tm_mad/ssh/monitor_ds \
2016-11-23 21:45:40 +03:00
src/tm_mad/ssh/cpds \
2020-10-07 11:39:27 +03:00
src/tm_mad/ssh/resize \
2020-10-20 19:53:05 +03:00
src/tm_mad/ssh/ssh_utils.sh \
F #5516: New backup interface for OpenNebula
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Neal Hansen <nhansen@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io>
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
BACKUP INTERFACE
=================
* Backups are exposed through a a special Datastore (BACKUP_DS) and
Image (BACKUP) types. These new types can only be used for backup'ing
up VMs. This approach allows to:
- Implement tier based backup policies (backups made on different
locations).
- Leverage access control and quota systems
- Support differnt storage and backup technologies
* Backup interface for the VMs:
- VM configures backups with BACKUP_CONFIG. This attribute can be set
in the VM template or updated with updateconf API call. It can include:
+ BACKUP_VOLATILE: To backup or not volatile disks
+ FS_FREEZE: How the FS is freeze for running VMs (qemu-agent,
suspend or none). When possible backups are crash consistent.
+ KEEP_LAST: keep only a given number of backups.
- Backups are initiated by the one.vm.backup API call that requires
the target Datastore to perform the backup (one-shot). This is
exposed by the onevm backup command.
- Backups can be periodic through scheduled actions.
- Backup configuration is updated with one.vm.updateconf API call.
* Restore interface:
- Restores are initiated by the one.image.restore API call. This is
exposed by oneimage restore command.
- Restore include configurable options for the VM template
+ NO_IP: to not preserve IP addresses (but keep the NICs and network
mapping)
+ NO_NIC: to not preserve network mappings
- Other template attributes:
+ Clean PCI devices, including network configuration in case of TYPE=NIC
attributes. By default it removes SHORT_ADDRESS and leave the "auto"
selection attributes.
+ Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node
- It is possible to restore single files stored in the repository by
using the backup specific URL.
* Sunstone (Ruby version) has been updated to expose this feautres.
BACKUP DRIVERS & IMPLEMENTATION
===============================
* Backup operation is implemented by a combination of 3 driver operations:
- VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate
backups for RUNNING VMs.
- TM. This commit introduces 2 new operations (and their
corresponding _live variants):
+ pre_backup(_live): Prepares the disks to be back'ed up in the
repository. It is specific to the driver: (i) ceph uses the export
operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as
needed.
+ post_backup(_live): Performs cleanning operations, i.e. KVM
snapshots or tmp dirs.
- DATASTORE. Each backup technology is represented by its
corresponfing driver, that needs to implement:
+ backup: it takes the VM disks in file (qcow2) format and stores it
the backup repository.
+ restore: it takes a backup image and restores the associated disks
and VM template.
+ monitor: to gather available space in the repository
+ rm: to remove existing backups
+ stat: to return the "restored" size of a disk stored in a backup
+ downloader pseudo-URL handler: in the form
<backup_proto>://<driver_snapshot_id>/<disk filename>
BACKUP MANAGEMENT
=================
Backup actions may potentially take some time, leaving some vmm_exec threads in
use for a long time, stucking other vmm operations. Backups are planned
by the scheduler through the sched action interface.
Two attributes has been added to sched.conf:
* MAX_BACKUPS max active backup operations in the cloud. No more
backups will be started beyond this limit.
* MAX_BACKUPS_HOST max number of backups per host
* Fix onevm CLI to properly show and manage schedule actions. --schedule
supports now, as well as relative times +<seconds_from_stime>
onvm backup --schedule now -d 100 63
* Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs
to use the batch interface or request specific permissions
Internal restructure of Scheduler:
- All sched_actions interface is now in SchedActionsXML class and files.
This class uses references to VM XML, and MUST be used in the same
lifetime scope.
- XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as
static functions.
- VirtualMachineActionPool includes counters for active backups (total
and per host).
SUPPORTED PLATFORMS
====================
* hypervisor: KVM
* TM: qcow2/shared/ssh, ceph
* backup: restic, rsync
Notes on Ceph
* Ceph backups are performed in the following steps:
1. A snapshot of each disk is taken (group snapshots cannot be used as
it seems we cannot export the disks afterwards)
2. Disks are export to a file
3. File is converted to qcow2 format
4. Disk files are upload to the backup repo
TODO:
* Confirm crash consistent snapshots cannot be used in Ceph
TODO:
* Check if using VM dir instead of full path is better to accomodate
DS migrations i.e.:
- Current path: /var/lib/one/datastores/100/53/backup/disk.0
- Proposal: 53/backup/disk.0
RESTIC DRIVER
=============
Developed together with this feature is part of the EE edtion.
* It supports the SFTP protocol, the following attributes are
supported:
- RESTIC_SFTP_SERVER
- RESTIC_SFTP_USER: only if different from oneadmin
- RESTIC_PASSWORD
- RESTIC_IONICE: Run restic under a given ionice priority (class 2)
- RESTIC_NICE: Run restic under a given nice
- RESTIC_BWLIMIT: Limit restic upload/download BW
- RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes:
off, auto, max). This requires repositories version 2. By default,
auto is used (average compression without to much CPU usage)
- RESTIC_CONNECTIONS: Sets the number of concurrent connections to a
backend (5 by default). For high-latency backends this number can be
increased.
* downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name>
snapshot_id is the restic snapshot hash. To recover single disk images
from a backup. This URLs support:
- RESTIC_CONNECTIONS
- RESTIC_BWLIMIT
- RESTIC_IONICE
- RESTIC_NICE
These options needs to be defined in the associated datastore.
RSYNC DRIVER
=============
A rsync driver is included as part of the CE distribution. It uses the
rsync tool to store backups in a remote server through SSH:
* The following attributes are supported to configure the backup
datastore:
- RSYNC_HOST
- RSYNC_USER
- RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default)
* downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover
single files from an existing backup. (RSYNC_HOST and RSYN_USER needs
to be set in ds_id
EMULATOR_CPUS
=============
This commit includes a non related backup feature:
* Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the
CPU IDs where the emulator threads will be pinned. If this value is
not defined the allocated CPU wll be used when using a PIN policy.
(cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b)
F OpenNebula/one#5516: adding rsync backup driver
(cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305)
F OpenNebula/one#5516: update install.sh, add vmid to source, some polish
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d)
F OpenNebula/one#5516: cleanup
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340)
F OpenNebula/one#5516: update downloader, default args, size check
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305)
LL
(cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
src/tm_mad/ssh/recovery_snap_create_live \
src/tm_mad/ssh/prebackup_live \
src/tm_mad/ssh/prebackup \
src/tm_mad/ssh/postbackup_live \
src/tm_mad/ssh/postbackup"
2012-02-29 01:52:48 +04:00
2020-10-06 10:33:58 +03:00
TM_SSH_ETC_FILES = "src/tm_mad/ssh/sshrc"
2012-02-29 01:52:48 +04:00
TM_DUMMY_FILES = " src/tm_mad/dummy/clone \
2012-09-24 18:18:17 +04:00
src/tm_mad/dummy/delete \
src/tm_mad/dummy/ln \
src/tm_mad/dummy/mkswap \
src/tm_mad/dummy/mkimage \
src/tm_mad/dummy/mv \
src/tm_mad/dummy/context \
src/tm_mad/dummy/premigrate \
src/tm_mad/dummy/postmigrate \
2015-06-04 22:43:32 +03:00
src/tm_mad/dummy/failmigrate \
2013-03-07 19:48:23 +04:00
src/tm_mad/dummy/mvds \
2015-06-08 16:00:32 +03:00
src/tm_mad/dummy/snap_create \
2015-08-12 13:31:58 +03:00
src/tm_mad/dummy/snap_create_live \
2015-06-08 16:00:32 +03:00
src/tm_mad/dummy/snap_delete \
src/tm_mad/dummy/snap_revert \
2016-08-09 03:28:55 +03:00
src/tm_mad/dummy/monitor \
2016-11-24 20:03:42 +03:00
src/tm_mad/dummy/cpds \
src/tm_mad/dummy/resize"
2012-02-29 01:52:48 +04:00
2013-02-14 21:55:37 +04:00
TM_CEPH_FILES = " src/tm_mad/ceph/clone \
2019-08-07 12:37:39 +03:00
src/tm_mad/ceph/clone.ssh \
2013-02-14 21:55:37 +04:00
src/tm_mad/ceph/ln \
2019-08-07 12:37:39 +03:00
src/tm_mad/ceph/ln.ssh \
2013-02-14 21:55:37 +04:00
src/tm_mad/ceph/mv \
src/tm_mad/ceph/mvds \
2019-08-07 12:37:39 +03:00
src/tm_mad/ceph/mvds.ssh \
2013-03-07 19:48:23 +04:00
src/tm_mad/ceph/cpds \
2019-08-07 12:37:39 +03:00
src/tm_mad/ceph/cpds.ssh \
2013-02-14 21:55:37 +04:00
src/tm_mad/ceph/premigrate \
src/tm_mad/ceph/postmigrate \
2015-05-28 18:19:48 +03:00
src/tm_mad/ceph/snap_create \
2015-08-12 13:31:58 +03:00
src/tm_mad/ceph/snap_create_live \
2015-05-28 18:19:48 +03:00
src/tm_mad/ceph/snap_delete \
src/tm_mad/ceph/snap_revert \
2015-06-04 22:43:32 +03:00
src/tm_mad/ceph/failmigrate \
2015-10-29 18:34:38 +03:00
src/tm_mad/ceph/delete \
2019-08-07 12:37:39 +03:00
src/tm_mad/ceph/delete.ssh \
2015-10-29 18:34:38 +03:00
src/tm_mad/ceph/context \
src/tm_mad/ceph/mkimage \
2015-11-04 19:31:56 +03:00
src/tm_mad/ceph/monitor \
2016-11-24 20:03:42 +03:00
src/tm_mad/ceph/mkswap \
2019-08-07 12:37:39 +03:00
src/tm_mad/ceph/resize \
F #5516: New backup interface for OpenNebula
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Neal Hansen <nhansen@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io>
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
BACKUP INTERFACE
=================
* Backups are exposed through a a special Datastore (BACKUP_DS) and
Image (BACKUP) types. These new types can only be used for backup'ing
up VMs. This approach allows to:
- Implement tier based backup policies (backups made on different
locations).
- Leverage access control and quota systems
- Support differnt storage and backup technologies
* Backup interface for the VMs:
- VM configures backups with BACKUP_CONFIG. This attribute can be set
in the VM template or updated with updateconf API call. It can include:
+ BACKUP_VOLATILE: To backup or not volatile disks
+ FS_FREEZE: How the FS is freeze for running VMs (qemu-agent,
suspend or none). When possible backups are crash consistent.
+ KEEP_LAST: keep only a given number of backups.
- Backups are initiated by the one.vm.backup API call that requires
the target Datastore to perform the backup (one-shot). This is
exposed by the onevm backup command.
- Backups can be periodic through scheduled actions.
- Backup configuration is updated with one.vm.updateconf API call.
* Restore interface:
- Restores are initiated by the one.image.restore API call. This is
exposed by oneimage restore command.
- Restore include configurable options for the VM template
+ NO_IP: to not preserve IP addresses (but keep the NICs and network
mapping)
+ NO_NIC: to not preserve network mappings
- Other template attributes:
+ Clean PCI devices, including network configuration in case of TYPE=NIC
attributes. By default it removes SHORT_ADDRESS and leave the "auto"
selection attributes.
+ Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node
- It is possible to restore single files stored in the repository by
using the backup specific URL.
* Sunstone (Ruby version) has been updated to expose this feautres.
BACKUP DRIVERS & IMPLEMENTATION
===============================
* Backup operation is implemented by a combination of 3 driver operations:
- VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate
backups for RUNNING VMs.
- TM. This commit introduces 2 new operations (and their
corresponding _live variants):
+ pre_backup(_live): Prepares the disks to be back'ed up in the
repository. It is specific to the driver: (i) ceph uses the export
operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as
needed.
+ post_backup(_live): Performs cleanning operations, i.e. KVM
snapshots or tmp dirs.
- DATASTORE. Each backup technology is represented by its
corresponfing driver, that needs to implement:
+ backup: it takes the VM disks in file (qcow2) format and stores it
the backup repository.
+ restore: it takes a backup image and restores the associated disks
and VM template.
+ monitor: to gather available space in the repository
+ rm: to remove existing backups
+ stat: to return the "restored" size of a disk stored in a backup
+ downloader pseudo-URL handler: in the form
<backup_proto>://<driver_snapshot_id>/<disk filename>
BACKUP MANAGEMENT
=================
Backup actions may potentially take some time, leaving some vmm_exec threads in
use for a long time, stucking other vmm operations. Backups are planned
by the scheduler through the sched action interface.
Two attributes has been added to sched.conf:
* MAX_BACKUPS max active backup operations in the cloud. No more
backups will be started beyond this limit.
* MAX_BACKUPS_HOST max number of backups per host
* Fix onevm CLI to properly show and manage schedule actions. --schedule
supports now, as well as relative times +<seconds_from_stime>
onvm backup --schedule now -d 100 63
* Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs
to use the batch interface or request specific permissions
Internal restructure of Scheduler:
- All sched_actions interface is now in SchedActionsXML class and files.
This class uses references to VM XML, and MUST be used in the same
lifetime scope.
- XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as
static functions.
- VirtualMachineActionPool includes counters for active backups (total
and per host).
SUPPORTED PLATFORMS
====================
* hypervisor: KVM
* TM: qcow2/shared/ssh, ceph
* backup: restic, rsync
Notes on Ceph
* Ceph backups are performed in the following steps:
1. A snapshot of each disk is taken (group snapshots cannot be used as
it seems we cannot export the disks afterwards)
2. Disks are export to a file
3. File is converted to qcow2 format
4. Disk files are upload to the backup repo
TODO:
* Confirm crash consistent snapshots cannot be used in Ceph
TODO:
* Check if using VM dir instead of full path is better to accomodate
DS migrations i.e.:
- Current path: /var/lib/one/datastores/100/53/backup/disk.0
- Proposal: 53/backup/disk.0
RESTIC DRIVER
=============
Developed together with this feature is part of the EE edtion.
* It supports the SFTP protocol, the following attributes are
supported:
- RESTIC_SFTP_SERVER
- RESTIC_SFTP_USER: only if different from oneadmin
- RESTIC_PASSWORD
- RESTIC_IONICE: Run restic under a given ionice priority (class 2)
- RESTIC_NICE: Run restic under a given nice
- RESTIC_BWLIMIT: Limit restic upload/download BW
- RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes:
off, auto, max). This requires repositories version 2. By default,
auto is used (average compression without to much CPU usage)
- RESTIC_CONNECTIONS: Sets the number of concurrent connections to a
backend (5 by default). For high-latency backends this number can be
increased.
* downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name>
snapshot_id is the restic snapshot hash. To recover single disk images
from a backup. This URLs support:
- RESTIC_CONNECTIONS
- RESTIC_BWLIMIT
- RESTIC_IONICE
- RESTIC_NICE
These options needs to be defined in the associated datastore.
RSYNC DRIVER
=============
A rsync driver is included as part of the CE distribution. It uses the
rsync tool to store backups in a remote server through SSH:
* The following attributes are supported to configure the backup
datastore:
- RSYNC_HOST
- RSYNC_USER
- RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default)
* downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover
single files from an existing backup. (RSYNC_HOST and RSYN_USER needs
to be set in ds_id
EMULATOR_CPUS
=============
This commit includes a non related backup feature:
* Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the
CPU IDs where the emulator threads will be pinned. If this value is
not defined the allocated CPU wll be used when using a PIN policy.
(cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b)
F OpenNebula/one#5516: adding rsync backup driver
(cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305)
F OpenNebula/one#5516: update install.sh, add vmid to source, some polish
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d)
F OpenNebula/one#5516: cleanup
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340)
F OpenNebula/one#5516: update downloader, default args, size check
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305)
LL
(cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
src/tm_mad/ceph/resize.ssh \
src/tm_mad/ceph/prebackup_live \
src/tm_mad/ceph/prebackup \
src/tm_mad/ceph/postbackup_live \
src/tm_mad/ceph/postbackup"
2013-02-14 21:55:37 +04:00
2014-06-17 21:31:53 +04:00
TM_DEV_FILES = " src/tm_mad/dev/clone \
src/tm_mad/dev/ln \
src/tm_mad/dev/mv \
src/tm_mad/dev/mvds \
src/tm_mad/dev/cpds \
src/tm_mad/dev/premigrate \
src/tm_mad/dev/postmigrate \
2015-06-08 16:00:32 +03:00
src/tm_mad/dev/snap_create \
2015-08-12 13:31:58 +03:00
src/tm_mad/dev/snap_create_live \
2015-06-08 16:00:32 +03:00
src/tm_mad/dev/snap_delete \
src/tm_mad/dev/snap_revert \
2015-06-04 22:43:32 +03:00
src/tm_mad/dev/failmigrate \
2016-11-24 20:03:42 +03:00
src/tm_mad/dev/delete \
src/tm_mad/dev/resize"
2014-06-17 21:31:53 +04:00
2016-01-08 15:18:22 +03:00
TM_VCENTER_FILES = " src/tm_mad/vcenter/clone \
src/tm_mad/vcenter/ln \
src/tm_mad/vcenter/mv \
src/tm_mad/vcenter/mvds \
src/tm_mad/vcenter/cpds \
src/tm_mad/vcenter/premigrate \
src/tm_mad/vcenter/postmigrate \
2017-06-28 15:43:38 +03:00
src/tm_mad/vcenter/resize \
2016-01-08 15:18:22 +03:00
src/tm_mad/vcenter/snap_create \
src/tm_mad/vcenter/snap_create_live \
src/tm_mad/vcenter/snap_delete \
src/tm_mad/vcenter/snap_revert \
src/tm_mad/vcenter/failmigrate \
2017-03-07 14:39:56 +03:00
src/tm_mad/vcenter/context \
src/tm_mad/vcenter/monitor \
src/tm_mad/vcenter/mkimage \
src/tm_mad/vcenter/mkswap \
2016-01-08 15:18:22 +03:00
src/tm_mad/vcenter/delete"
2016-05-09 13:09:07 +03:00
TM_ISCSI_FILES = " src/tm_mad/iscsi_libvirt/clone \
src/tm_mad/iscsi_libvirt/ln \
src/tm_mad/iscsi_libvirt/mv \
src/tm_mad/iscsi_libvirt/mvds \
src/tm_mad/iscsi_libvirt/cpds \
src/tm_mad/iscsi_libvirt/premigrate \
src/tm_mad/iscsi_libvirt/postmigrate \
src/tm_mad/iscsi_libvirt/snap_create \
src/tm_mad/iscsi_libvirt/snap_create_live \
src/tm_mad/iscsi_libvirt/snap_delete \
src/tm_mad/iscsi_libvirt/snap_revert \
src/tm_mad/iscsi_libvirt/failmigrate \
2016-11-24 20:03:42 +03:00
src/tm_mad/iscsi_libvirt/delete \
src/tm_mad/iscsi_libvirt/resize"
2015-12-22 12:03:58 +03:00
2011-03-22 20:21:09 +03:00
#-------------------------------------------------------------------------------
2012-02-19 05:08:03 +04:00
# Datastore drivers, to be installed under $REMOTES_LOCATION/datastore
2012-04-24 18:34:48 +04:00
# - Dummy Image Repository, $REMOTES_LOCATION/datastore/dummy
2012-02-19 05:08:03 +04:00
# - FS based Image Repository, $REMOTES_LOCATION/datastore/fs
2011-03-22 20:21:09 +03:00
#-------------------------------------------------------------------------------
2011-12-19 15:48:37 +04:00
2012-02-19 05:08:03 +04:00
DATASTORE_DRIVER_COMMON_SCRIPTS = " src/datastore_mad/remotes/xpath.rb \
2012-06-21 02:46:57 +04:00
src/datastore_mad/remotes/downloader.sh \
2018-12-11 14:21:43 +03:00
src/datastore_mad/remotes/lxd_downloader.sh \
2020-03-23 12:45:21 +03:00
src/datastore_mad/remotes/docker_downloader.sh \
2023-02-07 12:39:48 +03:00
src/datastore_mad/remotes/restic_downloader.rb \
2023-02-21 19:19:01 +03:00
src/datastore_mad/remotes/rsync_downloader.rb \
2016-02-10 14:25:28 +03:00
src/datastore_mad/remotes/vcenter_uploader.rb \
2016-03-08 19:22:34 +03:00
src/datastore_mad/remotes/vcenter_downloader.rb \
2016-02-02 17:27:58 +03:00
src/datastore_mad/remotes/url.rb \
2012-02-19 05:08:03 +04:00
src/datastore_mad/remotes/libfs.sh"
2011-03-22 20:21:09 +03:00
2012-03-14 15:20:06 +04:00
DATASTORE_DRIVER_DUMMY_SCRIPTS = " src/datastore_mad/remotes/dummy/cp \
src/datastore_mad/remotes/dummy/mkfs \
2012-06-01 15:55:01 +04:00
src/datastore_mad/remotes/dummy/stat \
2012-06-13 02:56:27 +04:00
src/datastore_mad/remotes/dummy/clone \
2013-07-10 20:32:41 +04:00
src/datastore_mad/remotes/dummy/monitor \
2015-06-08 13:45:16 +03:00
src/datastore_mad/remotes/dummy/snap_delete \
src/datastore_mad/remotes/dummy/snap_revert \
src/datastore_mad/remotes/dummy/snap_flatten \
2015-12-17 14:39:27 +03:00
src/datastore_mad/remotes/dummy/rm \
2022-11-16 11:27:03 +03:00
src/datastore_mad/remotes/dummy/restore \
2015-12-17 14:39:27 +03:00
src/datastore_mad/remotes/dummy/export"
2012-03-14 15:20:06 +04:00
2012-02-19 05:08:03 +04:00
DATASTORE_DRIVER_FS_SCRIPTS = " src/datastore_mad/remotes/fs/cp \
src/datastore_mad/remotes/fs/mkfs \
2012-06-01 15:55:01 +04:00
src/datastore_mad/remotes/fs/stat \
2012-06-13 02:56:27 +04:00
src/datastore_mad/remotes/fs/clone \
2013-06-27 04:30:49 +04:00
src/datastore_mad/remotes/fs/monitor \
2015-06-08 13:45:16 +03:00
src/datastore_mad/remotes/fs/snap_delete \
src/datastore_mad/remotes/fs/snap_revert \
src/datastore_mad/remotes/fs/snap_flatten \
2015-12-17 14:39:27 +03:00
src/datastore_mad/remotes/fs/rm \
src/datastore_mad/remotes/fs/export"
2011-06-07 21:03:32 +04:00
2019-04-09 17:11:15 +03:00
DATASTORE_DRIVER_ETC_FS_SCRIPTS = "src/datastore_mad/remotes/fs/fs.conf"
2013-02-14 21:55:37 +04:00
DATASTORE_DRIVER_CEPH_SCRIPTS = " src/datastore_mad/remotes/ceph/cp \
src/datastore_mad/remotes/ceph/mkfs \
src/datastore_mad/remotes/ceph/stat \
src/datastore_mad/remotes/ceph/rm \
2013-07-09 21:09:08 +04:00
src/datastore_mad/remotes/ceph/monitor \
2013-02-14 21:55:37 +04:00
src/datastore_mad/remotes/ceph/clone \
2015-06-08 13:45:16 +03:00
src/datastore_mad/remotes/ceph/snap_delete \
src/datastore_mad/remotes/ceph/snap_revert \
src/datastore_mad/remotes/ceph/snap_flatten \
2016-02-10 13:45:15 +03:00
src/datastore_mad/remotes/ceph/ceph_utils.sh \
src/datastore_mad/remotes/ceph/export"
2013-02-14 21:55:37 +04:00
2018-01-18 14:27:38 +03:00
DATASTORE_DRIVER_ETC_CEPH_SCRIPTS = "src/datastore_mad/remotes/ceph/ceph.conf"
2014-06-17 21:31:53 +04:00
DATASTORE_DRIVER_DEV_SCRIPTS = " src/datastore_mad/remotes/dev/cp \
src/datastore_mad/remotes/dev/mkfs \
src/datastore_mad/remotes/dev/stat \
src/datastore_mad/remotes/dev/rm \
src/datastore_mad/remotes/dev/monitor \
2015-06-08 13:45:16 +03:00
src/datastore_mad/remotes/dev/snap_delete \
src/datastore_mad/remotes/dev/snap_revert \
src/datastore_mad/remotes/dev/snap_flatten \
2014-06-17 21:31:53 +04:00
src/datastore_mad/remotes/dev/clone"
2016-01-07 20:59:38 +03:00
DATASTORE_DRIVER_VCENTER_SCRIPTS = " src/datastore_mad/remotes/vcenter/cp \
src/datastore_mad/remotes/vcenter/mkfs \
src/datastore_mad/remotes/vcenter/stat \
src/datastore_mad/remotes/vcenter/rm \
src/datastore_mad/remotes/vcenter/monitor \
src/datastore_mad/remotes/vcenter/snap_delete \
src/datastore_mad/remotes/vcenter/snap_revert \
src/datastore_mad/remotes/vcenter/snap_flatten \
2016-03-08 19:22:34 +03:00
src/datastore_mad/remotes/vcenter/clone \
src/datastore_mad/remotes/vcenter/export"
2016-01-07 20:59:38 +03:00
2016-05-09 13:09:07 +03:00
DATASTORE_DRIVER_ISCSI_SCRIPTS = " src/datastore_mad/remotes/iscsi_libvirt/cp \
src/datastore_mad/remotes/iscsi_libvirt/mkfs \
src/datastore_mad/remotes/iscsi_libvirt/stat \
src/datastore_mad/remotes/iscsi_libvirt/rm \
src/datastore_mad/remotes/iscsi_libvirt/monitor \
src/datastore_mad/remotes/iscsi_libvirt/snap_delete \
src/datastore_mad/remotes/iscsi_libvirt/snap_revert \
src/datastore_mad/remotes/iscsi_libvirt/snap_flatten \
src/datastore_mad/remotes/iscsi_libvirt/clone"
2015-12-22 12:03:58 +03:00
F #5516: New backup interface for OpenNebula
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Neal Hansen <nhansen@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io>
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
BACKUP INTERFACE
=================
* Backups are exposed through a a special Datastore (BACKUP_DS) and
Image (BACKUP) types. These new types can only be used for backup'ing
up VMs. This approach allows to:
- Implement tier based backup policies (backups made on different
locations).
- Leverage access control and quota systems
- Support differnt storage and backup technologies
* Backup interface for the VMs:
- VM configures backups with BACKUP_CONFIG. This attribute can be set
in the VM template or updated with updateconf API call. It can include:
+ BACKUP_VOLATILE: To backup or not volatile disks
+ FS_FREEZE: How the FS is freeze for running VMs (qemu-agent,
suspend or none). When possible backups are crash consistent.
+ KEEP_LAST: keep only a given number of backups.
- Backups are initiated by the one.vm.backup API call that requires
the target Datastore to perform the backup (one-shot). This is
exposed by the onevm backup command.
- Backups can be periodic through scheduled actions.
- Backup configuration is updated with one.vm.updateconf API call.
* Restore interface:
- Restores are initiated by the one.image.restore API call. This is
exposed by oneimage restore command.
- Restore include configurable options for the VM template
+ NO_IP: to not preserve IP addresses (but keep the NICs and network
mapping)
+ NO_NIC: to not preserve network mappings
- Other template attributes:
+ Clean PCI devices, including network configuration in case of TYPE=NIC
attributes. By default it removes SHORT_ADDRESS and leave the "auto"
selection attributes.
+ Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node
- It is possible to restore single files stored in the repository by
using the backup specific URL.
* Sunstone (Ruby version) has been updated to expose this feautres.
BACKUP DRIVERS & IMPLEMENTATION
===============================
* Backup operation is implemented by a combination of 3 driver operations:
- VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate
backups for RUNNING VMs.
- TM. This commit introduces 2 new operations (and their
corresponding _live variants):
+ pre_backup(_live): Prepares the disks to be back'ed up in the
repository. It is specific to the driver: (i) ceph uses the export
operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as
needed.
+ post_backup(_live): Performs cleanning operations, i.e. KVM
snapshots or tmp dirs.
- DATASTORE. Each backup technology is represented by its
corresponfing driver, that needs to implement:
+ backup: it takes the VM disks in file (qcow2) format and stores it
the backup repository.
+ restore: it takes a backup image and restores the associated disks
and VM template.
+ monitor: to gather available space in the repository
+ rm: to remove existing backups
+ stat: to return the "restored" size of a disk stored in a backup
+ downloader pseudo-URL handler: in the form
<backup_proto>://<driver_snapshot_id>/<disk filename>
BACKUP MANAGEMENT
=================
Backup actions may potentially take some time, leaving some vmm_exec threads in
use for a long time, stucking other vmm operations. Backups are planned
by the scheduler through the sched action interface.
Two attributes has been added to sched.conf:
* MAX_BACKUPS max active backup operations in the cloud. No more
backups will be started beyond this limit.
* MAX_BACKUPS_HOST max number of backups per host
* Fix onevm CLI to properly show and manage schedule actions. --schedule
supports now, as well as relative times +<seconds_from_stime>
onvm backup --schedule now -d 100 63
* Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs
to use the batch interface or request specific permissions
Internal restructure of Scheduler:
- All sched_actions interface is now in SchedActionsXML class and files.
This class uses references to VM XML, and MUST be used in the same
lifetime scope.
- XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as
static functions.
- VirtualMachineActionPool includes counters for active backups (total
and per host).
SUPPORTED PLATFORMS
====================
* hypervisor: KVM
* TM: qcow2/shared/ssh, ceph
* backup: restic, rsync
Notes on Ceph
* Ceph backups are performed in the following steps:
1. A snapshot of each disk is taken (group snapshots cannot be used as
it seems we cannot export the disks afterwards)
2. Disks are export to a file
3. File is converted to qcow2 format
4. Disk files are upload to the backup repo
TODO:
* Confirm crash consistent snapshots cannot be used in Ceph
TODO:
* Check if using VM dir instead of full path is better to accomodate
DS migrations i.e.:
- Current path: /var/lib/one/datastores/100/53/backup/disk.0
- Proposal: 53/backup/disk.0
RESTIC DRIVER
=============
Developed together with this feature is part of the EE edtion.
* It supports the SFTP protocol, the following attributes are
supported:
- RESTIC_SFTP_SERVER
- RESTIC_SFTP_USER: only if different from oneadmin
- RESTIC_PASSWORD
- RESTIC_IONICE: Run restic under a given ionice priority (class 2)
- RESTIC_NICE: Run restic under a given nice
- RESTIC_BWLIMIT: Limit restic upload/download BW
- RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes:
off, auto, max). This requires repositories version 2. By default,
auto is used (average compression without to much CPU usage)
- RESTIC_CONNECTIONS: Sets the number of concurrent connections to a
backend (5 by default). For high-latency backends this number can be
increased.
* downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name>
snapshot_id is the restic snapshot hash. To recover single disk images
from a backup. This URLs support:
- RESTIC_CONNECTIONS
- RESTIC_BWLIMIT
- RESTIC_IONICE
- RESTIC_NICE
These options needs to be defined in the associated datastore.
RSYNC DRIVER
=============
A rsync driver is included as part of the CE distribution. It uses the
rsync tool to store backups in a remote server through SSH:
* The following attributes are supported to configure the backup
datastore:
- RSYNC_HOST
- RSYNC_USER
- RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default)
* downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover
single files from an existing backup. (RSYNC_HOST and RSYN_USER needs
to be set in ds_id
EMULATOR_CPUS
=============
This commit includes a non related backup feature:
* Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the
CPU IDs where the emulator threads will be pinned. If this value is
not defined the allocated CPU wll be used when using a PIN policy.
(cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b)
F OpenNebula/one#5516: adding rsync backup driver
(cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305)
F OpenNebula/one#5516: update install.sh, add vmid to source, some polish
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d)
F OpenNebula/one#5516: cleanup
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340)
F OpenNebula/one#5516: update downloader, default args, size check
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305)
LL
(cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
DATASTORE_DRIVER_RSYNC_SCRIPTS = " src/datastore_mad/remotes/rsync/cp \
src/datastore_mad/remotes/rsync/mkfs \
src/datastore_mad/remotes/rsync/stat \
src/datastore_mad/remotes/rsync/clone \
src/datastore_mad/remotes/rsync/monitor \
src/datastore_mad/remotes/rsync/snap_delete \
src/datastore_mad/remotes/rsync/snap_revert \
src/datastore_mad/remotes/rsync/snap_flatten \
src/datastore_mad/remotes/rsync/rm \
src/datastore_mad/remotes/rsync/backup \
2023-03-14 13:02:05 +03:00
src/datastore_mad/remotes/rsync/backup_cancel \
F #5516: New backup interface for OpenNebula
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Neal Hansen <nhansen@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io>
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
BACKUP INTERFACE
=================
* Backups are exposed through a a special Datastore (BACKUP_DS) and
Image (BACKUP) types. These new types can only be used for backup'ing
up VMs. This approach allows to:
- Implement tier based backup policies (backups made on different
locations).
- Leverage access control and quota systems
- Support differnt storage and backup technologies
* Backup interface for the VMs:
- VM configures backups with BACKUP_CONFIG. This attribute can be set
in the VM template or updated with updateconf API call. It can include:
+ BACKUP_VOLATILE: To backup or not volatile disks
+ FS_FREEZE: How the FS is freeze for running VMs (qemu-agent,
suspend or none). When possible backups are crash consistent.
+ KEEP_LAST: keep only a given number of backups.
- Backups are initiated by the one.vm.backup API call that requires
the target Datastore to perform the backup (one-shot). This is
exposed by the onevm backup command.
- Backups can be periodic through scheduled actions.
- Backup configuration is updated with one.vm.updateconf API call.
* Restore interface:
- Restores are initiated by the one.image.restore API call. This is
exposed by oneimage restore command.
- Restore include configurable options for the VM template
+ NO_IP: to not preserve IP addresses (but keep the NICs and network
mapping)
+ NO_NIC: to not preserve network mappings
- Other template attributes:
+ Clean PCI devices, including network configuration in case of TYPE=NIC
attributes. By default it removes SHORT_ADDRESS and leave the "auto"
selection attributes.
+ Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node
- It is possible to restore single files stored in the repository by
using the backup specific URL.
* Sunstone (Ruby version) has been updated to expose this feautres.
BACKUP DRIVERS & IMPLEMENTATION
===============================
* Backup operation is implemented by a combination of 3 driver operations:
- VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate
backups for RUNNING VMs.
- TM. This commit introduces 2 new operations (and their
corresponding _live variants):
+ pre_backup(_live): Prepares the disks to be back'ed up in the
repository. It is specific to the driver: (i) ceph uses the export
operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as
needed.
+ post_backup(_live): Performs cleanning operations, i.e. KVM
snapshots or tmp dirs.
- DATASTORE. Each backup technology is represented by its
corresponfing driver, that needs to implement:
+ backup: it takes the VM disks in file (qcow2) format and stores it
the backup repository.
+ restore: it takes a backup image and restores the associated disks
and VM template.
+ monitor: to gather available space in the repository
+ rm: to remove existing backups
+ stat: to return the "restored" size of a disk stored in a backup
+ downloader pseudo-URL handler: in the form
<backup_proto>://<driver_snapshot_id>/<disk filename>
BACKUP MANAGEMENT
=================
Backup actions may potentially take some time, leaving some vmm_exec threads in
use for a long time, stucking other vmm operations. Backups are planned
by the scheduler through the sched action interface.
Two attributes has been added to sched.conf:
* MAX_BACKUPS max active backup operations in the cloud. No more
backups will be started beyond this limit.
* MAX_BACKUPS_HOST max number of backups per host
* Fix onevm CLI to properly show and manage schedule actions. --schedule
supports now, as well as relative times +<seconds_from_stime>
onvm backup --schedule now -d 100 63
* Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs
to use the batch interface or request specific permissions
Internal restructure of Scheduler:
- All sched_actions interface is now in SchedActionsXML class and files.
This class uses references to VM XML, and MUST be used in the same
lifetime scope.
- XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as
static functions.
- VirtualMachineActionPool includes counters for active backups (total
and per host).
SUPPORTED PLATFORMS
====================
* hypervisor: KVM
* TM: qcow2/shared/ssh, ceph
* backup: restic, rsync
Notes on Ceph
* Ceph backups are performed in the following steps:
1. A snapshot of each disk is taken (group snapshots cannot be used as
it seems we cannot export the disks afterwards)
2. Disks are export to a file
3. File is converted to qcow2 format
4. Disk files are upload to the backup repo
TODO:
* Confirm crash consistent snapshots cannot be used in Ceph
TODO:
* Check if using VM dir instead of full path is better to accomodate
DS migrations i.e.:
- Current path: /var/lib/one/datastores/100/53/backup/disk.0
- Proposal: 53/backup/disk.0
RESTIC DRIVER
=============
Developed together with this feature is part of the EE edtion.
* It supports the SFTP protocol, the following attributes are
supported:
- RESTIC_SFTP_SERVER
- RESTIC_SFTP_USER: only if different from oneadmin
- RESTIC_PASSWORD
- RESTIC_IONICE: Run restic under a given ionice priority (class 2)
- RESTIC_NICE: Run restic under a given nice
- RESTIC_BWLIMIT: Limit restic upload/download BW
- RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes:
off, auto, max). This requires repositories version 2. By default,
auto is used (average compression without to much CPU usage)
- RESTIC_CONNECTIONS: Sets the number of concurrent connections to a
backend (5 by default). For high-latency backends this number can be
increased.
* downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name>
snapshot_id is the restic snapshot hash. To recover single disk images
from a backup. This URLs support:
- RESTIC_CONNECTIONS
- RESTIC_BWLIMIT
- RESTIC_IONICE
- RESTIC_NICE
These options needs to be defined in the associated datastore.
RSYNC DRIVER
=============
A rsync driver is included as part of the CE distribution. It uses the
rsync tool to store backups in a remote server through SSH:
* The following attributes are supported to configure the backup
datastore:
- RSYNC_HOST
- RSYNC_USER
- RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default)
* downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover
single files from an existing backup. (RSYNC_HOST and RSYN_USER needs
to be set in ds_id
EMULATOR_CPUS
=============
This commit includes a non related backup feature:
* Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the
CPU IDs where the emulator threads will be pinned. If this value is
not defined the allocated CPU wll be used when using a PIN policy.
(cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b)
F OpenNebula/one#5516: adding rsync backup driver
(cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305)
F OpenNebula/one#5516: update install.sh, add vmid to source, some polish
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d)
F OpenNebula/one#5516: cleanup
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340)
F OpenNebula/one#5516: update downloader, default args, size check
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305)
LL
(cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
src/datastore_mad/remotes/rsync/restore \
2023-02-21 19:19:01 +03:00
src/datastore_mad/remotes/rsync/export \
src/datastore_mad/remotes/rsync/increment_flatten"
F #5516: New backup interface for OpenNebula
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Neal Hansen <nhansen@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io>
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
BACKUP INTERFACE
=================
* Backups are exposed through a a special Datastore (BACKUP_DS) and
Image (BACKUP) types. These new types can only be used for backup'ing
up VMs. This approach allows to:
- Implement tier based backup policies (backups made on different
locations).
- Leverage access control and quota systems
- Support differnt storage and backup technologies
* Backup interface for the VMs:
- VM configures backups with BACKUP_CONFIG. This attribute can be set
in the VM template or updated with updateconf API call. It can include:
+ BACKUP_VOLATILE: To backup or not volatile disks
+ FS_FREEZE: How the FS is freeze for running VMs (qemu-agent,
suspend or none). When possible backups are crash consistent.
+ KEEP_LAST: keep only a given number of backups.
- Backups are initiated by the one.vm.backup API call that requires
the target Datastore to perform the backup (one-shot). This is
exposed by the onevm backup command.
- Backups can be periodic through scheduled actions.
- Backup configuration is updated with one.vm.updateconf API call.
* Restore interface:
- Restores are initiated by the one.image.restore API call. This is
exposed by oneimage restore command.
- Restore include configurable options for the VM template
+ NO_IP: to not preserve IP addresses (but keep the NICs and network
mapping)
+ NO_NIC: to not preserve network mappings
- Other template attributes:
+ Clean PCI devices, including network configuration in case of TYPE=NIC
attributes. By default it removes SHORT_ADDRESS and leave the "auto"
selection attributes.
+ Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node
- It is possible to restore single files stored in the repository by
using the backup specific URL.
* Sunstone (Ruby version) has been updated to expose this feautres.
BACKUP DRIVERS & IMPLEMENTATION
===============================
* Backup operation is implemented by a combination of 3 driver operations:
- VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate
backups for RUNNING VMs.
- TM. This commit introduces 2 new operations (and their
corresponding _live variants):
+ pre_backup(_live): Prepares the disks to be back'ed up in the
repository. It is specific to the driver: (i) ceph uses the export
operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as
needed.
+ post_backup(_live): Performs cleanning operations, i.e. KVM
snapshots or tmp dirs.
- DATASTORE. Each backup technology is represented by its
corresponfing driver, that needs to implement:
+ backup: it takes the VM disks in file (qcow2) format and stores it
the backup repository.
+ restore: it takes a backup image and restores the associated disks
and VM template.
+ monitor: to gather available space in the repository
+ rm: to remove existing backups
+ stat: to return the "restored" size of a disk stored in a backup
+ downloader pseudo-URL handler: in the form
<backup_proto>://<driver_snapshot_id>/<disk filename>
BACKUP MANAGEMENT
=================
Backup actions may potentially take some time, leaving some vmm_exec threads in
use for a long time, stucking other vmm operations. Backups are planned
by the scheduler through the sched action interface.
Two attributes has been added to sched.conf:
* MAX_BACKUPS max active backup operations in the cloud. No more
backups will be started beyond this limit.
* MAX_BACKUPS_HOST max number of backups per host
* Fix onevm CLI to properly show and manage schedule actions. --schedule
supports now, as well as relative times +<seconds_from_stime>
onvm backup --schedule now -d 100 63
* Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs
to use the batch interface or request specific permissions
Internal restructure of Scheduler:
- All sched_actions interface is now in SchedActionsXML class and files.
This class uses references to VM XML, and MUST be used in the same
lifetime scope.
- XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as
static functions.
- VirtualMachineActionPool includes counters for active backups (total
and per host).
SUPPORTED PLATFORMS
====================
* hypervisor: KVM
* TM: qcow2/shared/ssh, ceph
* backup: restic, rsync
Notes on Ceph
* Ceph backups are performed in the following steps:
1. A snapshot of each disk is taken (group snapshots cannot be used as
it seems we cannot export the disks afterwards)
2. Disks are export to a file
3. File is converted to qcow2 format
4. Disk files are upload to the backup repo
TODO:
* Confirm crash consistent snapshots cannot be used in Ceph
TODO:
* Check if using VM dir instead of full path is better to accomodate
DS migrations i.e.:
- Current path: /var/lib/one/datastores/100/53/backup/disk.0
- Proposal: 53/backup/disk.0
RESTIC DRIVER
=============
Developed together with this feature is part of the EE edtion.
* It supports the SFTP protocol, the following attributes are
supported:
- RESTIC_SFTP_SERVER
- RESTIC_SFTP_USER: only if different from oneadmin
- RESTIC_PASSWORD
- RESTIC_IONICE: Run restic under a given ionice priority (class 2)
- RESTIC_NICE: Run restic under a given nice
- RESTIC_BWLIMIT: Limit restic upload/download BW
- RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes:
off, auto, max). This requires repositories version 2. By default,
auto is used (average compression without to much CPU usage)
- RESTIC_CONNECTIONS: Sets the number of concurrent connections to a
backend (5 by default). For high-latency backends this number can be
increased.
* downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name>
snapshot_id is the restic snapshot hash. To recover single disk images
from a backup. This URLs support:
- RESTIC_CONNECTIONS
- RESTIC_BWLIMIT
- RESTIC_IONICE
- RESTIC_NICE
These options needs to be defined in the associated datastore.
RSYNC DRIVER
=============
A rsync driver is included as part of the CE distribution. It uses the
rsync tool to store backups in a remote server through SSH:
* The following attributes are supported to configure the backup
datastore:
- RSYNC_HOST
- RSYNC_USER
- RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default)
* downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover
single files from an existing backup. (RSYNC_HOST and RSYN_USER needs
to be set in ds_id
EMULATOR_CPUS
=============
This commit includes a non related backup feature:
* Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the
CPU IDs where the emulator threads will be pinned. If this value is
not defined the allocated CPU wll be used when using a PIN policy.
(cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b)
F OpenNebula/one#5516: adding rsync backup driver
(cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305)
F OpenNebula/one#5516: update install.sh, add vmid to source, some polish
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d)
F OpenNebula/one#5516: cleanup
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340)
F OpenNebula/one#5516: update downloader, default args, size check
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305)
LL
(cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
2020-09-25 13:08:42 +03:00
DATASTORE_DRIVER_ETC_SCRIPTS = "src/datastore_mad/remotes/datastore.conf"
2015-12-17 14:39:27 +03:00
#-------------------------------------------------------------------------------
# Marketplace drivers, to be installed under $REMOTES_LOCATION/market
# - HTTP based marketplace, $REMOTES_LOCATION/market/http
2015-12-24 20:49:21 +03:00
# - OpenNebula public marketplace, $REMOTES_LOCATION/market/one
2015-12-20 00:35:09 +03:00
# - S3-obeject based marketplace, $REMOTES_LOCATION/market/s3
2018-12-11 14:21:43 +03:00
# - Linuxcontainers.org marketplace $REMOTE_LOCATION/market/linuxcontainers
2015-12-17 14:39:27 +03:00
#-------------------------------------------------------------------------------
MARKETPLACE_DRIVER_HTTP_SCRIPTS = " src/market_mad/remotes/http/import \
src/market_mad/remotes/http/delete \
src/market_mad/remotes/http/monitor"
2019-04-09 17:11:15 +03:00
MARKETPLACE_DRIVER_ETC_HTTP_SCRIPTS = "src/market_mad/remotes/http/http.conf"
2015-12-24 20:49:21 +03:00
MARKETPLACE_DRIVER_ONE_SCRIPTS = " src/market_mad/remotes/one/import \
src/market_mad/remotes/one/delete \
src/market_mad/remotes/one/monitor"
2016-01-25 18:42:59 +03:00
MARKETPLACE_DRIVER_S3_SCRIPTS = " src/market_mad/remotes/s3/import \
src/market_mad/remotes/s3/delete \
src/market_mad/remotes/s3/monitor \
src/market_mad/remotes/s3/S3.rb"
2021-05-31 14:27:53 +03:00
MARKETPLACE_DRIVER_COMMON_SCRIPTS = " src/market_mad/remotes/common/lxd.rb \
src/market_mad/remotes/common/docker.rb"
2020-05-18 04:01:50 +03:00
2018-12-11 14:21:43 +03:00
MARKETPLACE_DRIVER_LXC_SCRIPTS = " src/market_mad/remotes/linuxcontainers/import \
src/market_mad/remotes/linuxcontainers/delete \
src/market_mad/remotes/linuxcontainers/monitor"
2020-02-07 12:42:58 +03:00
MARKETPLACE_DRIVER_TK_SCRIPTS = " src/market_mad/remotes/turnkeylinux/import \
src/market_mad/remotes/turnkeylinux/delete \
src/market_mad/remotes/turnkeylinux/monitor"
2020-05-10 21:14:20 +03:00
MARKETPLACE_DRIVER_DH_SCRIPTS = " src/market_mad/remotes/dockerhub/import \
src/market_mad/remotes/dockerhub/delete \
src/market_mad/remotes/dockerhub/monitor"
2021-05-31 14:27:53 +03:00
MARKETPLACE_DRIVER_REGISTRY_SCRIPTS = " src/market_mad/remotes/docker_registry/import \
src/market_mad/remotes/docker_registry/delete \
src/market_mad/remotes/docker_registry/monitor"
2011-04-29 20:59:39 +04:00
#-------------------------------------------------------------------------------
# Migration scripts for onedb command, to be installed under $LIB_LOCATION
#-------------------------------------------------------------------------------
2014-03-03 22:11:17 +04:00
ONEDB_FILES = " src/onedb/fsck.rb \
src/onedb/onedb.rb \
2016-01-21 20:38:05 +03:00
src/onedb/onedb_backend.rb \
2017-05-16 11:21:54 +03:00
src/onedb/vcenter_one54.rb \
2017-02-28 20:14:21 +03:00
src/onedb/sqlite2mysql.rb \
2017-05-09 17:25:36 +03:00
src/onedb/database_schema.rb \
2017-07-05 17:21:15 +03:00
src/onedb/fsck \
src/onedb/onedb_live.rb"
2014-03-03 22:11:17 +04:00
2015-10-08 17:14:27 +03:00
ONEDB_PATCH_FILES = " src/onedb/patches/4.14_monitoring.rb \
2016-04-26 00:48:00 +03:00
src/onedb/patches/history_times.rb"
2015-07-08 16:51:18 +03:00
2009-01-02 17:58:51 +03:00
#-------------------------------------------------------------------------------
# Configuration files for OpenNebula, to be installed under $ETC_LOCATION
#-------------------------------------------------------------------------------
ETC_FILES = " share/etc/oned.conf \
2011-07-12 16:00:02 +04:00
share/etc/defaultrc \
2022-06-27 20:35:24 +03:00
share/etc/guacd \
2015-05-20 11:55:52 +03:00
src/tm_mad/tmrc \
2020-03-04 18:05:57 +03:00
src/scheduler/etc/sched.conf \
src/monitor/etc/monitord.conf "
2009-01-02 17:58:51 +03:00
2013-10-16 19:38:03 +04:00
EC2_ETC_FILES = " src/vmm_mad/remotes/ec2/ec2_driver.conf \
src/vmm_mad/remotes/ec2/ec2_driver.default"
2011-11-29 19:37:01 +04:00
2014-06-19 19:03:18 +04:00
AZ_ETC_FILES = " src/vmm_mad/remotes/az/az_driver.conf \
src/vmm_mad/remotes/az/az_driver.default"
2020-04-30 17:06:21 +03:00
VCENTER_ETC_FILES = "src/vmm_mad/remotes/lib/vcenter_driver/vcenter_driver.default"
2017-04-26 09:58:25 +03:00
2009-01-02 17:58:51 +03:00
#-------------------------------------------------------------------------------
# Virtualization drivers config. files, to be installed under $ETC_LOCATION
2011-06-01 20:57:47 +04:00
# - ssh, $ETC_LOCATION/vmm_exec
2009-01-02 17:58:51 +03:00
#-------------------------------------------------------------------------------
2011-06-01 20:57:47 +04:00
VMM_EXEC_ETC_FILES = " src/vmm_mad/exec/vmm_execrc \
2019-11-05 17:54:33 +03:00
src/vmm_mad/exec/vmm_exec_kvm.conf"
2010-08-24 18:44:42 +04:00
2009-04-04 03:34:33 +04:00
#-------------------------------------------------------------------------------
# Hook Manager driver config. files, to be installed under $ETC_LOCATION/hm
#-------------------------------------------------------------------------------
HM_ETC_FILES = "src/hm_mad/hmrc"
2010-07-09 20:59:42 +04:00
#-------------------------------------------------------------------------------
2011-08-26 18:51:13 +04:00
# Auth Manager drivers config. files, to be installed under $ETC_LOCATION/auth
2010-07-09 20:59:42 +04:00
#-------------------------------------------------------------------------------
2011-10-24 19:24:42 +04:00
AUTH_ETC_FILES = " src/authm_mad/remotes/server_x509/server_x509_auth.conf \
2011-11-29 20:02:43 +04:00
src/authm_mad/remotes/ldap/ldap_auth.conf \
2011-08-30 20:04:53 +04:00
src/authm_mad/remotes/x509/x509_auth.conf"
2010-07-09 20:59:42 +04:00
2009-01-02 17:58:51 +03:00
#-------------------------------------------------------------------------------
# Sample files, to be installed under $SHARE_LOCATION/examples
#-------------------------------------------------------------------------------
EXAMPLE_SHARE_FILES = " share/examples/vm.template \
share/examples/private.net \
share/examples/public.net"
2023-09-14 13:11:14 +03:00
#-------------------------------------------------------------------------------
# Sample files, to be installed under $SHARE_LOCATION/examples/external_scheduler
#-------------------------------------------------------------------------------
EXAMPLE_EXTERNAL_SCHED_FILES = "share/examples/external_scheduler/external_scheduler_server.rb"
2019-09-23 16:16:05 +03:00
#-------------------------------------------------------------------------------
# Sample files, to be installed under $SHARE_LOCATION/examples/host_hooks
#-------------------------------------------------------------------------------
EXAMPLE_HOST_HOOKS_SHARE_FILES = "share/examples/host_hooks/error_hook"
2019-10-02 17:49:32 +03:00
#-------------------------------------------------------------------------------
# LXD network issues vnm hook patches
#-------------------------------------------------------------------------------
LXD_NETWORK_HOOKS = "share/examples/network_hooks/99-lxd_clean.rb"
2013-04-26 14:29:06 +04:00
#-------------------------------------------------------------------------------
# Files required to interact with the websockify server
#-------------------------------------------------------------------------------
2019-01-18 17:48:10 +03:00
WEBSOCKIFY_SHARE_RUN_FILES = "share/websockify/run"
WEBSOCKIFY_SHARE_MODULE_FILES = " share/websockify/websockify/__init__.py \
share/websockify/websockify/auth_plugins.py \
share/websockify/websockify/token_plugins.py \
share/websockify/websockify/websocket.py \
share/websockify/websockify/websocketproxy.py"
2013-04-26 14:29:06 +04:00
2021-07-22 17:29:26 +03:00
#-------------------------------------------------------------------------------
# HOOK scripts, to be installed under $VAR_LOCATION/remotes/hooks/autostart
#-------------------------------------------------------------------------------
HOOK_AUTOSTART_FILES = " share/hooks/autostart/host \
share/hooks/autostart/vm"
2009-07-21 14:45:54 +04:00
#-------------------------------------------------------------------------------
2017-05-18 17:40:52 +03:00
# HOOK scripts, to be installed under $VAR_LOCATION/remotes/hooks/ft
2011-06-07 21:03:32 +04:00
#-------------------------------------------------------------------------------
2017-05-18 17:40:52 +03:00
HOOK_FT_FILES = " share/hooks/ft/host_error.rb \
2017-06-26 09:05:50 +03:00
share/hooks/ft/fence_host.sh"
2017-05-18 17:41:18 +03:00
#-------------------------------------------------------------------------------
# HOOK RAFT scripts, to be installed under $VAR_LOCATION/remotes/hooks/raft
#-------------------------------------------------------------------------------
2017-05-29 18:49:30 +03:00
HOOK_RAFT_FILES = "share/hooks/raft/vip.sh"
2011-06-07 21:03:32 +04:00
#-------------------------------------------------------------------------------
2011-11-15 15:44:50 +04:00
# Installation scripts, to be installed under $SHARE_LOCATION
2009-07-21 14:45:54 +04:00
#-------------------------------------------------------------------------------
2016-09-12 20:15:30 +03:00
INSTALL_GEMS_SHARE_FILES = " share/install_gems/install_gems \
share/install_gems/Gemfile"
2017-06-23 00:45:21 +03:00
2016-09-01 18:34:56 +03:00
ONETOKEN_SHARE_FILE = "share/onetoken/onetoken.sh"
2011-05-13 20:22:14 +04:00
2017-06-23 00:45:21 +03:00
FOLLOWER_CLEANUP_SHARE_FILE = "share/hooks/raft/follower_cleanup"
2022-06-16 19:57:07 +03:00
PRE_CLEANUP_SHARE_FILE = "share/pkgs/services/systemd/pre_cleanup"
2020-04-01 16:22:32 +03:00
#-------------------------------------------------------------------------------
# Start script files, to be installed under $SHARE_LOCATION/start-scripts
#-------------------------------------------------------------------------------
START_SCRIPT_SHARE_FILES = " share/start-scripts/map_vnets_start_script \
share/start-scripts/cron_start_script"
2011-06-13 18:08:07 +04:00
#-------------------------------------------------------------------------------
# OCA Files
#-------------------------------------------------------------------------------
2012-12-18 18:52:11 +04:00
OCA_LIB_FILES = "src/oca/ruby/opennebula.rb"
2013-07-10 15:29:53 +04:00
RUBY_OPENNEBULA_LIB_FILES = " src/oca/ruby/opennebula/acl_pool.rb \
src/oca/ruby/opennebula/acl.rb \
src/oca/ruby/opennebula/client.rb \
src/oca/ruby/opennebula/cluster_pool.rb \
src/oca/ruby/opennebula/cluster.rb \
src/oca/ruby/opennebula/datastore_pool.rb \
src/oca/ruby/opennebula/datastore.rb \
src/oca/ruby/opennebula/document_json.rb \
src/oca/ruby/opennebula/document_pool_json.rb \
src/oca/ruby/opennebula/document_pool.rb \
src/oca/ruby/opennebula/document.rb \
src/oca/ruby/opennebula/error.rb \
src/oca/ruby/opennebula/group_pool.rb \
src/oca/ruby/opennebula/group.rb \
src/oca/ruby/opennebula/host_pool.rb \
src/oca/ruby/opennebula/host.rb \
src/oca/ruby/opennebula/image_pool.rb \
src/oca/ruby/opennebula/image.rb \
2020-10-28 17:11:34 +03:00
src/oca/ruby/opennebula/lockable_ext.rb \
2021-02-08 18:10:34 +03:00
src/oca/ruby/opennebula/wait_ext.rb \
2014-12-19 19:30:00 +03:00
src/oca/ruby/opennebula/oneflow_client.rb \
2013-07-10 15:29:53 +04:00
src/oca/ruby/opennebula/pool_element.rb \
src/oca/ruby/opennebula/pool.rb \
2014-09-08 13:59:13 +04:00
src/oca/ruby/opennebula/security_group_pool.rb \
src/oca/ruby/opennebula/security_group.rb \
2017-01-03 03:22:10 +03:00
src/oca/ruby/opennebula/vm_group_pool.rb \
src/oca/ruby/opennebula/vm_group.rb \
2013-07-10 15:29:53 +04:00
src/oca/ruby/opennebula/system.rb \
src/oca/ruby/opennebula/template_pool.rb \
src/oca/ruby/opennebula/template.rb \
2020-09-25 11:15:19 +03:00
src/oca/ruby/opennebula/template_ext.rb \
2013-07-10 15:29:53 +04:00
src/oca/ruby/opennebula/user_pool.rb \
src/oca/ruby/opennebula/user.rb \
2014-12-19 19:30:00 +03:00
src/oca/ruby/opennebula/vdc_pool.rb \
src/oca/ruby/opennebula/vdc.rb \
2013-07-10 15:29:53 +04:00
src/oca/ruby/opennebula/virtual_machine.rb \
2020-09-25 11:15:19 +03:00
src/oca/ruby/opennebula/virtual_machine_ext.rb \
2017-02-20 17:42:45 +03:00
src/oca/ruby/opennebula/virtual_machine_pool.rb \
2013-07-10 15:29:53 +04:00
src/oca/ruby/opennebula/virtual_network_pool.rb \
src/oca/ruby/opennebula/virtual_network.rb \
src/oca/ruby/opennebula/xml_element.rb \
src/oca/ruby/opennebula/xml_pool.rb \
2013-07-11 18:01:01 +04:00
src/oca/ruby/opennebula/xml_utils.rb \
2014-12-19 19:30:00 +03:00
src/oca/ruby/opennebula/zone_pool.rb \
2015-11-30 18:55:22 +03:00
src/oca/ruby/opennebula/zone.rb \
src/oca/ruby/opennebula/virtual_router_pool.rb \
2015-12-06 01:52:28 +03:00
src/oca/ruby/opennebula/virtual_router.rb \
src/oca/ruby/opennebula/marketplace_pool.rb \
2015-12-11 17:53:19 +03:00
src/oca/ruby/opennebula/marketplace.rb \
src/oca/ruby/opennebula/marketplaceapp_pool.rb \
2017-06-20 18:09:11 +03:00
src/oca/ruby/opennebula/marketplaceapp.rb \
2020-09-25 11:15:19 +03:00
src/oca/ruby/opennebula/marketplaceapp_ext.rb \
2018-11-21 18:20:29 +03:00
src/oca/ruby/opennebula/utils.rb \
src/oca/ruby/opennebula/vntemplate_pool.rb \
2019-09-09 15:43:51 +03:00
src/oca/ruby/opennebula/vntemplate.rb \
src/oca/ruby/opennebula/hook_pool.rb \
2019-09-19 16:26:20 +03:00
src/oca/ruby/opennebula/hook.rb \
2023-07-03 19:15:52 +03:00
src/oca/ruby/opennebula/backupjob_pool.rb \
src/oca/ruby/opennebula/backupjob.rb \
2020-09-25 11:15:19 +03:00
src/oca/ruby/opennebula/hook_log.rb \
src/oca/ruby/opennebula/flow.rb"
RUBY_OPENNEBULA_LIB_FLOW_FILES = " src/oca/ruby/opennebula/flow/grammar.rb \
src/oca/ruby/opennebula/flow/service_pool.rb \
src/oca/ruby/opennebula/flow/service_template_pool.rb \
src/oca/ruby/opennebula/flow/service_template.rb \
src/oca/ruby/opennebula/flow/service_template_ext.rb \
src/oca/ruby/opennebula/flow/validator.rb"
2011-06-13 18:08:07 +04:00
2009-07-21 18:56:35 +04:00
#-------------------------------------------------------------------------------
2009-10-12 03:06:46 +04:00
# Common Cloud Files
2009-07-21 18:56:35 +04:00
#-------------------------------------------------------------------------------
2009-10-12 03:06:46 +04:00
COMMON_CLOUD_LIB_FILES = " src/cloud/common/CloudServer.rb \
2009-10-21 21:00:59 +04:00
src/cloud/common/CloudClient.rb \
2011-09-24 22:34:38 +04:00
src/cloud/common/CloudAuth.rb"
2009-07-23 16:01:47 +04:00
2009-10-22 15:20:27 +04:00
COMMON_CLOUD_CLIENT_LIB_FILES = "src/cloud/common/CloudClient.rb"
2014-07-15 00:37:11 +04:00
CLOUD_AUTH_LIB_FILES = " src/cloud/common/CloudAuth/SunstoneCloudAuth.rb \
2012-03-21 19:28:58 +04:00
src/cloud/common/CloudAuth/X509CloudAuth.rb \
2016-04-14 10:41:11 +03:00
src/cloud/common/CloudAuth/RemoteCloudAuth.rb \
2013-06-27 13:36:08 +04:00
src/cloud/common/CloudAuth/OneGateCloudAuth.rb \
2012-03-21 19:28:58 +04:00
src/cloud/common/CloudAuth/OpenNebulaCloudAuth.rb"
2011-09-21 20:59:13 +04:00
2011-02-10 17:57:11 +03:00
#-----------------------------------------------------------------------------
# CLI files
#-----------------------------------------------------------------------------
2011-06-13 18:08:07 +04:00
CLI_LIB_FILES = " src/cli/cli_helper.rb \
src/cli/command_parser.rb \
src/cli/one_helper.rb"
ONE_CLI_LIB_FILES = " src/cli/one_helper/onegroup_helper.rb \
src/cli/one_helper/onehost_helper.rb \
src/cli/one_helper/oneimage_helper.rb \
src/cli/one_helper/onetemplate_helper.rb \
2012-06-11 19:26:01 +04:00
src/cli/one_helper/onequota_helper.rb \
2011-06-13 18:08:07 +04:00
src/cli/one_helper/oneuser_helper.rb \
src/cli/one_helper/onevm_helper.rb \
2011-06-30 13:17:22 +04:00
src/cli/one_helper/onevnet_helper.rb \
2012-02-09 21:55:18 +04:00
src/cli/one_helper/oneacl_helper.rb \
2012-02-24 18:53:53 +04:00
src/cli/one_helper/onedatastore_helper.rb \
2012-08-23 16:24:48 +04:00
src/cli/one_helper/onecluster_helper.rb \
2013-12-12 22:10:12 +04:00
src/cli/one_helper/onezone_helper.rb \
2014-12-19 19:30:00 +03:00
src/cli/one_helper/onevdc_helper.rb \
2014-09-08 13:59:13 +04:00
src/cli/one_helper/oneacct_helper.rb \
2015-11-30 18:55:22 +03:00
src/cli/one_helper/onesecgroup_helper.rb \
2017-01-03 03:22:10 +03:00
src/cli/one_helper/onevmgroup_helper.rb \
2015-12-06 01:52:28 +03:00
src/cli/one_helper/onevrouter_helper.rb \
2015-12-11 17:53:19 +03:00
src/cli/one_helper/onemarketapp_helper.rb \
2018-03-23 17:10:20 +03:00
src/cli/one_helper/onevcenter_helper.rb \
2018-11-21 18:20:29 +03:00
src/cli/one_helper/onemarket_helper.rb \
2019-09-09 15:43:51 +03:00
src/cli/one_helper/onevntemplate_helper.rb \
2020-02-19 16:55:20 +03:00
src/cli/one_helper/onehook_helper.rb \
2023-07-03 19:15:52 +03:00
src/cli/one_helper/onebackupjob_helper.rb \
2020-02-19 16:55:20 +03:00
src/cli/one_helper/oneflow_helper.rb \
src/cli/one_helper/oneflowtemplate_helper.rb"
2011-02-10 17:57:11 +03:00
CLI_BIN_FILES = " src/cli/onevm \
src/cli/onehost \
src/cli/onevnet \
src/cli/oneuser \
src/cli/oneimage \
2011-05-10 20:45:15 +04:00
src/cli/onetemplate \
2011-06-22 21:22:52 +04:00
src/cli/onegroup \
2012-02-09 21:55:18 +04:00
src/cli/oneacl \
2012-02-24 18:53:53 +04:00
src/cli/onedatastore \
2012-08-23 16:24:48 +04:00
src/cli/onecluster \
2013-12-12 22:10:12 +04:00
src/cli/onezone \
2013-07-10 15:29:53 +04:00
src/cli/oneflow \
src/cli/oneflow-template \
2014-09-08 13:59:13 +04:00
src/cli/oneacct \
2014-12-10 19:28:52 +03:00
src/cli/onesecgroup \
2017-01-03 03:22:10 +03:00
src/cli/onevmgroup \
2014-12-19 19:30:00 +03:00
src/cli/oneshowback \
2015-11-30 18:55:22 +03:00
src/cli/onevdc \
2015-12-06 01:52:28 +03:00
src/cli/onevrouter \
2015-12-11 17:53:19 +03:00
src/cli/onemarketapp \
2018-11-21 18:20:29 +03:00
src/cli/onemarket \
2019-09-09 15:43:51 +03:00
src/cli/onevntemplate \
2022-04-19 12:26:22 +03:00
src/cli/oneirb \
src/cli/onelog \
2023-07-03 19:15:52 +03:00
src/cli/onehook \
src/cli/onebackupjob"
2011-02-10 17:57:11 +03:00
2011-06-13 18:08:07 +04:00
CLI_CONF_FILES = " src/cli/etc/onegroup.yaml \
src/cli/etc/onehost.yaml \
src/cli/etc/oneimage.yaml \
src/cli/etc/onetemplate.yaml \
src/cli/etc/oneuser.yaml \
src/cli/etc/onevm.yaml \
2011-06-30 13:17:22 +04:00
src/cli/etc/onevnet.yaml \
2012-02-09 21:55:18 +04:00
src/cli/etc/oneacl.yaml \
2012-02-24 18:53:53 +04:00
src/cli/etc/onedatastore.yaml \
2012-06-01 18:30:27 +04:00
src/cli/etc/onecluster.yaml \
2013-12-12 22:10:12 +04:00
src/cli/etc/onezone.yaml \
2014-09-08 13:59:13 +04:00
src/cli/etc/oneacct.yaml \
2014-12-10 19:28:52 +03:00
src/cli/etc/onesecgroup.yaml \
2017-01-03 03:22:10 +03:00
src/cli/etc/onevmgroup.yaml \
2014-12-19 19:30:00 +03:00
src/cli/etc/oneshowback.yaml \
2015-11-30 18:55:22 +03:00
src/cli/etc/onevdc.yaml \
2015-12-06 01:52:28 +03:00
src/cli/etc/onevrouter.yaml \
2015-12-11 17:53:19 +03:00
src/cli/etc/onemarketapp.yaml \
2018-11-22 17:00:47 +03:00
src/cli/etc/onemarket.yaml \
2019-09-09 15:43:51 +03:00
src/cli/etc/onevntemplate.yaml \
2020-05-18 00:50:52 +03:00
src/cli/etc/onehook.yaml \
2023-07-03 19:15:52 +03:00
src/cli/etc/onebackupjob.yaml \
2020-05-18 00:50:52 +03:00
src/cli/etc/oneflow.yaml \
src/cli/etc/oneflowtemplate.yaml"
2011-06-13 18:08:07 +04:00
2018-11-29 17:14:17 +03:00
#-----------------------------------------------------------------------------
# Provision files
#-----------------------------------------------------------------------------
2020-10-13 14:38:19 +03:00
ONEPROVISION_BIN_FILES = " src/cli/oneprovision \
2021-09-16 19:04:11 +03:00
src/cli/oneprovider"
2018-11-29 17:14:17 +03:00
2020-10-13 14:38:19 +03:00
ONEPROVISION_ONE_LIB_FILES = " src/cli/one_helper/oneprovision_helper.rb \
2021-09-16 19:04:11 +03:00
src/cli/one_helper/oneprovider_helper.rb"
2018-11-29 17:14:17 +03:00
2020-10-13 14:38:19 +03:00
ONEPROVISION_CONF_FILES = " src/cli/etc/oneprovision.yaml \
2021-09-16 19:04:11 +03:00
src/cli/etc/oneprovider.yaml"
2018-11-29 17:14:17 +03:00
ONEPROVISION_ANSIBLE_FILES = "share/oneprovision/ansible"
2022-04-06 18:40:55 +03:00
ONEPROVISION_TEMPLATES_FILES = " share/oneprovision/edge-clusters/ \
share/oneprovision/edge-clusters-extra/ "
2020-04-20 18:54:07 +03:00
2020-10-13 14:38:19 +03:00
ONEPROVISION_LIB_FILES = " src/oneprovision/lib/oneprovision.rb \
src/oneprovision/lib/provision_element.rb"
2021-05-19 17:46:47 +03:00
ONEPROVISION_LIB_API_VULTR_FILES = "src/oneprovision/provider_apis/vultr/lib/vultr.rb"
2020-10-13 14:38:19 +03:00
ONEPROVISION_LIB_PROVISION_FILES = " src/oneprovision/lib/provision/ansible.rb \
src/oneprovision/lib/provision/oneprovision.rb \
src/oneprovision/lib/provision/driver.rb \
src/oneprovision/lib/provision/provision.rb \
2020-11-12 17:32:12 +03:00
src/oneprovision/lib/provision/provision_config.rb \
2020-10-13 14:38:19 +03:00
src/oneprovision/lib/provision/provision_pool.rb \
src/oneprovision/lib/provision/resources.rb \
src/oneprovision/lib/provision/utils.rb"
ONEPROVISION_LIB_RESOURCES_FILES = " src/oneprovision/lib/provision/resources/virtual.rb \
src/oneprovision/lib/provision/resources/resource.rb \
src/oneprovision/lib/provision/resources/physical.rb"
ONEPROVISION_LIB_PHYSICAL_R_FILES = " src/oneprovision/lib/provision/resources/physical/cluster.rb \
src/oneprovision/lib/provision/resources/physical/datastore.rb \
src/oneprovision/lib/provision/resources/physical/host.rb \
src/oneprovision/lib/provision/resources/physical/physical_resource.rb \
src/oneprovision/lib/provision/resources/physical/network.rb"
ONEPROVISION_LIB_VIRTUAL_R_FILES = " src/oneprovision/lib/provision/resources/virtual/virtual_resource.rb \
src/oneprovision/lib/provision/resources/virtual/virtual_sync_resource.rb \
src/oneprovision/lib/provision/resources/virtual/image.rb \
src/oneprovision/lib/provision/resources/virtual/marketplaceapp.rb \
src/oneprovision/lib/provision/resources/virtual/template.rb \
src/oneprovision/lib/provision/resources/virtual/flowtemplate.rb \
src/oneprovision/lib/provision/resources/virtual/vntemplate.rb"
ONEPROVISION_LIB_PROVIDER_FILES = " src/oneprovision/lib/provider/provider.rb \
src/oneprovision/lib/provider/provider_pool.rb"
2021-09-16 19:04:11 +03:00
ONEPROVISION_LIB_TF_FILES = "src/oneprovision/lib/terraform/terraform.rb"
2020-10-28 17:11:34 +03:00
ONEPROVISION_LIB_PROVIDERS_FILES = " src/oneprovision/lib/terraform/providers/aws.rb \
2021-04-30 12:49:51 +03:00
src/oneprovision/lib/terraform/providers/google.rb \
src/oneprovision/lib/terraform/providers/digitalocean.rb \
2022-05-01 12:34:42 +03:00
src/oneprovision/lib/terraform/providers/onprem.rb \
2021-09-16 19:04:11 +03:00
src/oneprovision/lib/terraform/providers/example \
2021-09-17 10:51:14 +03:00
src/oneprovision/lib/terraform/providers/equinix.rb \
2021-05-19 13:24:19 +03:00
src/oneprovision/lib/terraform/providers/vultr.rb \
src/oneprovision/lib/terraform/providers/vultr_metal.rb \
src/oneprovision/lib/terraform/providers/vultr_virtual.rb"
2020-10-28 17:11:34 +03:00
2020-11-06 13:09:28 +03:00
ONEPROVISION_LIB_AWS_ERB_FILES = " src/oneprovision/lib/terraform/providers/templates/aws/cluster.erb \
src/oneprovision/lib/terraform/providers/templates/aws/datastore.erb \
src/oneprovision/lib/terraform/providers/templates/aws/host.erb \
src/oneprovision/lib/terraform/providers/templates/aws/network.erb \
src/oneprovision/lib/terraform/providers/templates/aws/provider.erb"
2021-04-30 12:49:51 +03:00
ONEPROVISION_LIB_GOOGLE_ERB_FILES = " src/oneprovision/lib/terraform/providers/templates/google/cluster.erb \
src/oneprovision/lib/terraform/providers/templates/google/datastore.erb \
src/oneprovision/lib/terraform/providers/templates/google/host.erb \
src/oneprovision/lib/terraform/providers/templates/google/network.erb \
src/oneprovision/lib/terraform/providers/templates/google/provider.erb"
ONEPROVISION_LIB_DIGITALOCEAN_ERB_FILES = " src/oneprovision/lib/terraform/providers/templates/digitalocean/cluster.erb \
src/oneprovision/lib/terraform/providers/templates/digitalocean/datastore.erb \
src/oneprovision/lib/terraform/providers/templates/digitalocean/host.erb \
src/oneprovision/lib/terraform/providers/templates/digitalocean/network.erb \
src/oneprovision/lib/terraform/providers/templates/digitalocean/provider.erb"
2021-09-17 10:51:14 +03:00
ONEPROVISION_LIB_EQUINIX_ERB_FILES = " src/oneprovision/lib/terraform/providers/templates/equinix/cluster.erb \
src/oneprovision/lib/terraform/providers/templates/equinix/datastore.erb \
src/oneprovision/lib/terraform/providers/templates/equinix/host.erb \
src/oneprovision/lib/terraform/providers/templates/equinix/network.erb \
src/oneprovision/lib/terraform/providers/templates/equinix/provider.erb"
2020-10-28 17:11:34 +03:00
2021-05-19 13:24:19 +03:00
ONEPROVISION_LIB_VULTR_METAL_ERB_FILES = " src/oneprovision/lib/terraform/providers/templates/vultr_metal/cluster.erb \
src/oneprovision/lib/terraform/providers/templates/vultr_metal/datastore.erb \
src/oneprovision/lib/terraform/providers/templates/vultr_metal/host.erb \
src/oneprovision/lib/terraform/providers/templates/vultr_metal/network.erb \
src/oneprovision/lib/terraform/providers/templates/vultr_metal/provider.erb"
ONEPROVISION_LIB_VULTR_VIRTUAL_ERB_FILES = " src/oneprovision/lib/terraform/providers/templates/vultr_virtual/cluster.erb \
src/oneprovision/lib/terraform/providers/templates/vultr_virtual/datastore.erb \
src/oneprovision/lib/terraform/providers/templates/vultr_virtual/host.erb \
src/oneprovision/lib/terraform/providers/templates/vultr_virtual/network.erb \
src/oneprovision/lib/terraform/providers/templates/vultr_virtual/provider.erb"
2011-02-23 19:27:17 +03:00
#-----------------------------------------------------------------------------
# Sunstone files
#-----------------------------------------------------------------------------
2012-03-07 16:10:14 +04:00
SUNSTONE_FILES = " src/sunstone/sunstone-server.rb \
2013-03-05 19:19:09 +04:00
src/sunstone/config.ru"
2011-02-23 19:27:17 +03:00
2013-03-05 19:19:09 +04:00
SUNSTONE_BIN_FILES = " src/sunstone/bin/sunstone-server \
2020-09-04 23:45:26 +03:00
src/sunstone/bin/novnc-server"
2011-02-23 19:27:17 +03:00
2011-06-17 13:43:53 +04:00
SUNSTONE_ETC_FILES = " src/sunstone/etc/sunstone-server.conf \
2015-11-25 20:06:16 +03:00
src/sunstone/etc/sunstone-views.yaml \
src/sunstone/etc/sunstone-logos.yaml"
2013-04-10 22:37:01 +04:00
2018-01-19 15:24:16 +03:00
SUNSTONE_ETC_VIEW_KVM = " src/sunstone/etc/sunstone-views/kvm/admin.yaml \
src/sunstone/etc/sunstone-views/kvm/user.yaml \
src/sunstone/etc/sunstone-views/kvm/cloud.yaml \
src/sunstone/etc/sunstone-views/kvm/groupadmin.yaml"
SUNSTONE_ETC_VIEW_VCENTER = " src/sunstone/etc/sunstone-views/vcenter/admin.yaml \
src/sunstone/etc/sunstone-views/vcenter/user.yaml \
src/sunstone/etc/sunstone-views/vcenter/cloud.yaml \
src/sunstone/etc/sunstone-views/vcenter/groupadmin.yaml"
SUNSTONE_ETC_VIEW_MIXED = " src/sunstone/etc/sunstone-views/mixed/admin.yaml \
src/sunstone/etc/sunstone-views/mixed/user.yaml \
src/sunstone/etc/sunstone-views/mixed/cloud.yaml \
src/sunstone/etc/sunstone-views/mixed/groupadmin.yaml"
2011-05-13 20:01:40 +04:00
2011-02-23 19:27:17 +03:00
SUNSTONE_MODELS_FILES = " src/sunstone/models/OpenNebulaJSON.rb \
2021-02-08 12:40:30 +03:00
src/sunstone/models/SunstoneServer.rb \
src/sunstone/models/SunstoneViews.rb \
src/sunstone/models/sunstone_vm_helper.rb \
2021-04-22 18:32:40 +03:00
src/sunstone/models/sunstone_remotes.rb \
2021-02-08 12:40:30 +03:00
src/sunstone/models/sunstone_vnc.rb \
src/sunstone/models/sunstone_guac.rb \
src/sunstone/models/sunstone_vmrc.rb \
src/sunstone/models/OpenNebula2FA/SunstoneWebAuthn.rb \
src/sunstone/models/OpenNebula2FA/sunstone_qr_code.rb \
src/sunstone/models/OpenNebula2FA/sunstone_optp.rb \
src/sunstone/models/OpenNebula2FA/sunstone_2f_auth.rb"
2011-02-23 19:27:17 +03:00
2011-06-14 01:43:40 +04:00
SUNSTONE_MODELS_JSON_FILES = " src/sunstone/models/OpenNebulaJSON/HostJSON.rb \
2011-02-23 19:27:17 +03:00
src/sunstone/models/OpenNebulaJSON/ImageJSON.rb \
2011-06-14 02:01:27 +04:00
src/sunstone/models/OpenNebulaJSON/GroupJSON.rb \
2011-02-23 19:27:17 +03:00
src/sunstone/models/OpenNebulaJSON/JSONUtils.rb \
src/sunstone/models/OpenNebulaJSON/PoolJSON.rb \
src/sunstone/models/OpenNebulaJSON/UserJSON.rb \
2017-02-20 17:42:45 +03:00
src/sunstone/models/OpenNebulaJSON/VMGroupJSON.rb \
2011-02-23 19:27:17 +03:00
src/sunstone/models/OpenNebulaJSON/VirtualMachineJSON.rb \
2011-05-12 14:01:12 +04:00
src/sunstone/models/OpenNebulaJSON/TemplateJSON.rb \
2011-07-22 18:28:07 +04:00
src/sunstone/models/OpenNebulaJSON/AclJSON.rb \
2012-03-09 00:33:07 +04:00
src/sunstone/models/OpenNebulaJSON/ClusterJSON.rb \
2012-03-10 20:47:15 +04:00
src/sunstone/models/OpenNebulaJSON/DatastoreJSON.rb \
2013-12-13 22:19:41 +04:00
src/sunstone/models/OpenNebulaJSON/VirtualNetworkJSON.rb \
2018-12-11 13:08:02 +03:00
src/sunstone/models/OpenNebulaJSON/VirtualNetworkTemplateJSON.rb \
2014-09-11 19:38:29 +04:00
src/sunstone/models/OpenNebulaJSON/ZoneJSON.rb \
2015-01-09 18:03:18 +03:00
src/sunstone/models/OpenNebulaJSON/SecurityGroupJSON.rb \
2015-11-30 18:55:22 +03:00
src/sunstone/models/OpenNebulaJSON/VdcJSON.rb \
2016-02-03 18:53:01 +03:00
src/sunstone/models/OpenNebulaJSON/VirtualRouterJSON.rb \
2023-09-06 20:02:27 +03:00
src/sunstone/models/OpenNebulaJSON/BackupJobJSON.rb \
2016-02-03 18:53:01 +03:00
src/sunstone/models/OpenNebulaJSON/MarketPlaceJSON.rb \
src/sunstone/models/OpenNebulaJSON/MarketPlaceAppJSON.rb"
2011-02-23 19:27:17 +03:00
2013-04-16 22:05:32 +04:00
SUNSTONE_VIEWS_FILES = " src/sunstone/views/index.erb \
2012-08-28 20:26:05 +04:00
src/sunstone/views/login.erb \
2013-07-08 21:03:44 +04:00
src/sunstone/views/vnc.erb \
2020-09-21 19:44:59 +03:00
src/sunstone/views/vmrc.erb \
2014-12-05 20:41:19 +03:00
src/sunstone/views/spice.erb \
2021-04-22 18:32:40 +03:00
src/sunstone/views/guac.erb \
2012-08-28 20:26:05 +04:00
src/sunstone/views/_login_standard.erb \
src/sunstone/views/_login_x509.erb"
2011-02-23 19:27:17 +03:00
2015-06-23 19:04:44 +03:00
SUNSTONE_PUBLIC_JS_FILES = " src/sunstone/public/dist/login.js \
src/sunstone/public/dist/login.js.map \
src/sunstone/public/dist/main.js \
2018-05-10 16:47:33 +03:00
src/sunstone/public/dist/main.js.map \
src/sunstone/public/dist/main-dist.js"
2015-06-23 19:04:44 +03:00
2015-06-26 18:44:50 +03:00
SUNSTONE_PUBLIC_JS_CONSOLE_FILES = " src/sunstone/public/dist/console/vnc.js \
src/sunstone/public/dist/console/vnc.js.map \
src/sunstone/public/dist/console/spice.js \
2021-02-08 19:35:58 +03:00
src/sunstone/public/dist/console/spice.js.map \
2021-04-22 18:32:40 +03:00
src/sunstone/public/dist/console/guacamole.js \
src/sunstone/public/dist/console/guacamole.js.map \
2021-02-08 19:35:58 +03:00
src/sunstone/public/dist/console/vmrc.js \
src/sunstone/public/dist/console/vmrc.js.map"
2015-06-26 18:44:50 +03:00
2015-07-17 16:48:30 +03:00
SUNSTONE_PUBLIC_DEV_DIR = "src/sunstone/public"
2013-07-11 18:01:01 +04:00
2014-09-25 21:36:40 +04:00
SUNSTONE_ROUTES_FILES = " src/sunstone/routes/oneflow.rb \
2014-10-10 16:44:05 +04:00
src/sunstone/routes/vcenter.rb \
2019-09-23 18:58:08 +03:00
src/sunstone/routes/support.rb \
src/sunstone/routes/nsx.rb"
2011-04-11 21:37:01 +04:00
2015-06-23 19:04:44 +03:00
SUNSTONE_PUBLIC_CSS_FILES = " src/sunstone/public/css/app.min.css \
2015-08-27 18:38:19 +03:00
src/sunstone/public/css/opensans/opensans.woff \
2021-04-22 18:32:40 +03:00
src/sunstone/public/css/vmrc-custom.css \
2015-06-25 18:09:47 +03:00
src/sunstone/public/css/novnc-custom.css \
2021-04-22 18:32:40 +03:00
src/sunstone/public/css/guac-custom.css \
2019-07-10 20:00:26 +03:00
src/sunstone/public/css/spice-custom.css"
2011-02-23 19:27:17 +03:00
2018-05-10 17:17:07 +03:00
SUNSTONE_PUBLIC_FONT_AWSOME = " src/sunstone/public/bower_components/fontawesome/web-fonts-with-css/webfonts/fa-brands-400.eot \
src/sunstone/public/bower_components/fontawesome/web-fonts-with-css/webfonts/fa-brands-400.svg \
src/sunstone/public/bower_components/fontawesome/web-fonts-with-css/webfonts/fa-brands-400.ttf \
src/sunstone/public/bower_components/fontawesome/web-fonts-with-css/webfonts/fa-brands-400.woff \
src/sunstone/public/bower_components/fontawesome/web-fonts-with-css/webfonts/fa-brands-400.woff2 \
src/sunstone/public/bower_components/fontawesome/web-fonts-with-css/webfonts/fa-regular-400.eot \
src/sunstone/public/bower_components/fontawesome/web-fonts-with-css/webfonts/fa-regular-400.svg \
src/sunstone/public/bower_components/fontawesome/web-fonts-with-css/webfonts/fa-regular-400.ttf \
src/sunstone/public/bower_components/fontawesome/web-fonts-with-css/webfonts/fa-regular-400.woff \
src/sunstone/public/bower_components/fontawesome/web-fonts-with-css/webfonts/fa-regular-400.woff2 \
src/sunstone/public/bower_components/fontawesome/web-fonts-with-css/webfonts/fa-solid-900.eot \
src/sunstone/public/bower_components/fontawesome/web-fonts-with-css/webfonts/fa-solid-900.svg \
src/sunstone/public/bower_components/fontawesome/web-fonts-with-css/webfonts/fa-solid-900.ttf \
src/sunstone/public/bower_components/fontawesome/web-fonts-with-css/webfonts/fa-solid-900.woff \
src/sunstone/public/bower_components/fontawesome/web-fonts-with-css/webfonts/fa-solid-900.woff2"
2011-06-16 19:51:21 +04:00
2011-02-23 19:27:17 +03:00
SUNSTONE_PUBLIC_IMAGES_FILES = " src/sunstone/public/images/ajax-loader.gif \
2022-07-01 15:47:53 +03:00
src/sunstone/public/images/favicon.svg \
2015-03-13 19:24:14 +03:00
src/sunstone/public/images/advanced_layout.png \
src/sunstone/public/images/cloud_layout.png \
2015-03-17 12:56:01 +03:00
src/sunstone/public/images/vcenter_layout.png \
2016-06-01 17:36:15 +03:00
src/sunstone/public/images/opennebula-5.0.png \
2013-03-01 22:05:13 +04:00
src/sunstone/public/images/opennebula-sunstone-v4.0.png \
2015-11-12 13:11:42 +03:00
src/sunstone/public/images/opennebula-sunstone-v4.14-small.png \
2011-02-23 19:27:17 +03:00
src/sunstone/public/images/panel.png \
2011-09-23 17:20:19 +04:00
src/sunstone/public/images/panel_short.png \
2011-02-23 19:27:17 +03:00
src/sunstone/public/images/pbar.gif \
2012-04-20 16:47:06 +04:00
"
2012-01-05 15:44:00 +04:00
2018-03-01 12:31:47 +03:00
SUNSTONE_PUBLIC_LOGOS_FILES = " src/sunstone/public/images/logos/alt.png \
src/sunstone/public/images/logos/arch.png \
2014-05-20 17:13:22 +04:00
src/sunstone/public/images/logos/centos.png \
src/sunstone/public/images/logos/debian.png \
src/sunstone/public/images/logos/fedora.png \
src/sunstone/public/images/logos/linux.png \
src/sunstone/public/images/logos/redhat.png \
src/sunstone/public/images/logos/ubuntu.png \
src/sunstone/public/images/logos/windowsxp.png \
src/sunstone/public/images/logos/windows8.png \
"
2012-09-29 17:21:00 +04:00
SUNSTONE_PUBLIC_LOCALE_CA = " \
2015-06-23 19:20:22 +03:00
src/sunstone/public/locale/languages/ca.js \
src/sunstone/public/locale/languages/ca_datatable.txt"
2012-09-29 17:21:00 +04:00
SUNSTONE_PUBLIC_LOCALE_CS_CZ = " \
2015-06-23 19:20:22 +03:00
src/sunstone/public/locale/languages/cs_CZ.js \
src/sunstone/public/locale/languages/cs_datatable.txt"
2012-09-29 17:21:00 +04:00
SUNSTONE_PUBLIC_LOCALE_DE = " \
2015-06-23 19:20:22 +03:00
src/sunstone/public/locale/languages/de.js \
src/sunstone/public/locale/languages/de_datatable.txt"
2012-09-29 17:21:00 +04:00
2013-07-16 18:55:26 +04:00
SUNSTONE_PUBLIC_LOCALE_DA = " \
2015-06-23 19:20:22 +03:00
src/sunstone/public/locale/languages/da.js \
src/sunstone/public/locale/languages/da_datatable.txt"
2013-07-16 18:55:26 +04:00
2011-12-02 20:55:26 +04:00
SUNSTONE_PUBLIC_LOCALE_EN_US = " \
2015-06-23 19:20:22 +03:00
src/sunstone/public/locale/languages/en_US.js \
src/sunstone/public/locale/languages/en_datatable.txt"
2011-12-01 22:25:17 +04:00
2012-09-29 17:21:00 +04:00
SUNSTONE_PUBLIC_LOCALE_ES_ES = " \
2015-06-23 19:20:22 +03:00
src/sunstone/public/locale/languages/es_ES.js \
src/sunstone/public/locale/languages/es_datatable.txt"
2012-09-29 17:21:00 +04:00
2012-07-06 14:21:40 +04:00
SUNSTONE_PUBLIC_LOCALE_FA_IR = " \
2015-06-23 19:20:22 +03:00
src/sunstone/public/locale/languages/fa_IR.js \
src/sunstone/public/locale/languages/fa_datatable.txt"
2012-07-06 14:21:40 +04:00
2012-07-04 15:41:33 +04:00
SUNSTONE_PUBLIC_LOCALE_FR_FR = " \
2015-06-23 19:20:22 +03:00
src/sunstone/public/locale/languages/fr_FR.js \
src/sunstone/public/locale/languages/fr_datatable.txt"
2011-12-01 22:25:17 +04:00
2012-07-04 15:41:33 +04:00
SUNSTONE_PUBLIC_LOCALE_IT_IT = " \
2015-06-23 19:20:22 +03:00
src/sunstone/public/locale/languages/it_IT.js \
src/sunstone/public/locale/languages/it_datatable.txt"
2012-03-29 14:24:03 +04:00
2015-03-09 14:50:02 +03:00
SUNSTONE_PUBLIC_LOCALE_JA = " \
2015-06-23 19:20:22 +03:00
src/sunstone/public/locale/languages/ja.js \
src/sunstone/public/locale/languages/ja_datatable.txt"
2015-03-09 14:50:02 +03:00
SUNSTONE_PUBLIC_LOCALE_LT_LT = " \
2015-06-23 19:20:22 +03:00
src/sunstone/public/locale/languages/lt_LT.js \
src/sunstone/public/locale/languages/lt_datatable.txt"
2015-03-09 14:50:02 +03:00
2013-05-06 19:17:27 +04:00
SUNSTONE_PUBLIC_LOCALE_NL_NL = " \
2015-06-23 19:20:22 +03:00
src/sunstone/public/locale/languages/nl_NL.js \
src/sunstone/public/locale/languages/nl_datatable.txt"
2013-05-06 19:17:27 +04:00
SUNSTONE_PUBLIC_LOCALE_PL = " \
2015-06-23 19:20:22 +03:00
src/sunstone/public/locale/languages/pl.js \
src/sunstone/public/locale/languages/pl_datatable.txt"
2013-05-06 19:17:27 +04:00
2012-07-04 15:41:33 +04:00
SUNSTONE_PUBLIC_LOCALE_PT_PT = " \
2015-06-23 19:20:22 +03:00
src/sunstone/public/locale/languages/pt_PT.js \
src/sunstone/public/locale/languages/pt_datatable.txt"
2012-04-13 14:38:13 +04:00
2012-10-19 14:30:18 +04:00
SUNSTONE_PUBLIC_LOCALE_PT_BR = " \
2015-06-25 18:09:47 +03:00
src/sunstone/public/locale/languages/pt_BR.js"
2012-10-19 14:30:18 +04:00
2012-09-18 15:28:06 +04:00
SUNSTONE_PUBLIC_LOCALE_RU_RU = " \
2015-06-23 19:20:22 +03:00
src/sunstone/public/locale/languages/ru_RU.js \
src/sunstone/public/locale/languages/ru_datatable.txt"
2012-07-04 15:41:33 +04:00
SUNSTONE_PUBLIC_LOCALE_SK_SK = " \
2015-06-23 19:20:22 +03:00
src/sunstone/public/locale/languages/sk_SK.js \
src/sunstone/public/locale/languages/sk_datatable.txt"
2012-07-04 15:41:33 +04:00
2013-05-06 19:17:27 +04:00
SUNSTONE_PUBLIC_LOCALE_ZH_CN = " \
2015-06-23 19:20:22 +03:00
src/sunstone/public/locale/languages/zh_CN.js \
src/sunstone/public/locale/languages/zh_datatable.txt"
2013-05-06 19:17:27 +04:00
2018-02-12 19:35:35 +03:00
SUNSTONE_PUBLIC_LOCALE_TR_TR = " \
src/sunstone/public/locale/languages/tr_TR.js \
src/sunstone/public/locale/languages/tr_datatable.txt"
2020-07-02 17:12:39 +03:00
#-----------------------------------------------------------------------------
# FireEdge files
#-----------------------------------------------------------------------------
FIREEDGE_BIN_FILES = "src/fireedge/bin/fireedge-server"
2020-09-29 15:46:00 +03:00
FIREEDGE_MINIFIED_FILES = " src/fireedge/dist \
src/fireedge/node_modules"
2020-07-02 17:12:39 +03:00
FIREEDGE_DEV_FILES = " src/fireedge/src \
2020-10-08 17:04:59 +03:00
src/fireedge/package.json"
2021-06-29 18:57:31 +03:00
FIREEDGE_ETC_FILES = "src/fireedge/etc/fireedge-server.conf"
2021-09-16 19:04:11 +03:00
#----------------------------------------------------------------------------
# FireEdge Provision files
#----------------------------------------------------------------------------
2021-10-25 20:42:13 +03:00
FIREEDGE_PROVISION_ETC = "src/fireedge/etc/provision/provision-server.conf"
2021-09-16 19:04:11 +03:00
FIREEDGE_PROVISION_ETC_PROVIDERS = " src/fireedge/etc/provision/providers.d/aws.yaml \
2022-05-01 12:34:42 +03:00
src/fireedge/etc/provision/providers.d/onprem.yaml \
2022-04-06 18:40:55 +03:00
src/fireedge/etc/provision/providers.d/equinix.yaml"
FIREEDGE_PROVISION_ETC_PROVIDERS_EXTRA = " src/fireedge/etc/provision/providers.d-extra/digitalocean.yaml \
src/fireedge/etc/provision/providers.d-extra/google.yaml \
src/fireedge/etc/provision/providers.d-extra/vultr_metal.yaml \
src/fireedge/etc/provision/providers.d-extra/vultr_virtual.yaml"
2021-09-16 19:04:11 +03:00
2021-06-29 18:57:31 +03:00
#----------------------------------------------------------------------------
# FireEdge Sunstone files
#----------------------------------------------------------------------------
2021-07-01 11:37:20 +03:00
FIREEDGE_SUNSTONE_ETC = " src/fireedge/etc/sunstone/sunstone-server.conf \
src/fireedge/etc/sunstone/sunstone-views.yaml"
2021-06-29 18:57:31 +03:00
2022-03-23 13:32:20 +03:00
FIREEDGE_SUNSTONE_ETC_VIEW_ADMIN = " src/fireedge/etc/sunstone/admin/vm-tab.yaml \
2022-06-15 19:52:12 +03:00
src/fireedge/etc/sunstone/admin/vm-template-tab.yaml \
2023-10-18 17:33:24 +03:00
src/fireedge/etc/sunstone/admin/vm-group-tab.yaml \
2022-03-28 12:49:09 +03:00
src/fireedge/etc/sunstone/admin/marketplace-app-tab.yaml \
2022-07-06 16:56:49 +03:00
src/fireedge/etc/sunstone/admin/vnet-tab.yaml \
2022-07-26 17:58:11 +03:00
src/fireedge/etc/sunstone/admin/image-tab.yaml\
2022-07-28 12:59:10 +03:00
src/fireedge/etc/sunstone/admin/file-tab.yaml\
2022-09-19 19:41:18 +03:00
src/fireedge/etc/sunstone/admin/sec-group-tab.yaml\
2022-11-07 12:50:58 +03:00
src/fireedge/etc/sunstone/admin/backup-tab.yaml \
2023-03-24 12:57:13 +03:00
src/fireedge/etc/sunstone/admin/datastore-tab.yaml \
2023-07-12 12:22:28 +03:00
src/fireedge/etc/sunstone/admin/vdc-tab.yaml \
2024-01-08 21:14:16 +03:00
src/fireedge/etc/sunstone/admin/user-tab.yaml \
2023-11-02 18:27:02 +03:00
src/fireedge/etc/sunstone/admin/backupjobs-tab.yaml \
2023-11-20 20:00:18 +03:00
src/fireedge/etc/sunstone/admin/host-tab.yaml \
2023-12-22 14:44:52 +03:00
src/fireedge/etc/sunstone/admin/group-tab.yaml \
2024-01-05 13:43:23 +03:00
src/fireedge/etc/sunstone/admin/acl-tab.yaml \
src/fireedge/etc/sunstone/admin/cluster-tab.yaml"
2021-06-29 18:57:31 +03:00
2021-09-30 19:07:14 +03:00
FIREEDGE_SUNSTONE_ETC_VIEW_USER = " src/fireedge/etc/sunstone/user/vm-tab.yaml \
2022-06-15 19:52:12 +03:00
src/fireedge/etc/sunstone/user/vm-template-tab.yaml \
2022-07-06 16:56:49 +03:00
src/fireedge/etc/sunstone/user/marketplace-app-tab.yaml \
2022-07-28 12:59:10 +03:00
src/fireedge/etc/sunstone/user/image-tab.yaml\
src/fireedge/etc/sunstone/user/file-tab.yaml\
2022-11-07 12:50:58 +03:00
src/fireedge/etc/sunstone/user/backup-tab.yaml \
2023-02-13 20:36:03 +03:00
src/fireedge/etc/sunstone/user/sec-group-tab.yaml \
2022-07-06 16:56:49 +03:00
src/fireedge/etc/sunstone/user/vnet-tab.yaml"
2020-07-02 17:12:39 +03:00
2023-11-20 20:00:18 +03:00
FIREEDGE_SUNSTONE_ETC_VIEW_GROUPADMIN = " src/fireedge/etc/sunstone/groupadmin/vm-tab.yaml \
src/fireedge/etc/sunstone/groupadmin/vm-template-tab.yaml \
src/fireedge/etc/sunstone/groupadmin/marketplace-app-tab.yaml \
src/fireedge/etc/sunstone/groupadmin/image-tab.yaml\
src/fireedge/etc/sunstone/groupadmin/file-tab.yaml\
src/fireedge/etc/sunstone/groupadmin/backup-tab.yaml \
src/fireedge/etc/sunstone/groupadmin/sec-group-tab.yaml \
src/fireedge/etc/sunstone/groupadmin/vnet-tab.yaml \
src/fireedge/etc/sunstone/groupadmin/user-tab.yaml \
src/fireedge/etc/sunstone/groupadmin/group-tab.yaml"
2024-01-08 21:14:16 +03:00
2023-11-20 20:00:18 +03:00
FIREEDGE_SUNSTONE_ETC_VIEW_CLOUD = " src/fireedge/etc/sunstone/cloud/vm-tab.yaml \
2024-01-08 21:14:16 +03:00
src/fireedge/etc/sunstone/cloud/vm-template-tab.yaml"
2023-11-20 20:00:18 +03:00
2013-06-27 13:36:08 +04:00
#-----------------------------------------------------------------------------
# OneGate files
#-----------------------------------------------------------------------------
ONEGATE_FILES = " src/onegate/onegate-server.rb \
2020-10-22 13:55:27 +03:00
src/onegate/config.ru \
share/onegate/onegate"
2013-06-27 13:36:08 +04:00
ONEGATE_BIN_FILES = "src/onegate/bin/onegate-server"
ONEGATE_ETC_FILES = "src/onegate/etc/onegate-server.conf"
2011-12-02 20:37:49 +04:00
2022-11-14 22:48:30 +03:00
#-----------------------------------------------------------------------------
# OneGateProxy files
#-----------------------------------------------------------------------------
ONEGATE_PROXY_FILES = "src/onegate-proxy/onegate-proxy.rb"
ONEGATE_PROXY_BIN_FILES = "src/onegate-proxy/bin/onegate-proxy"
2022-12-07 11:25:48 +03:00
ONEGATE_PROXY_REMOTES_ETC_FILES = "src/onegate-proxy/etc/onegate-proxy.conf"
2022-11-14 22:48:30 +03:00
2013-07-10 15:29:53 +04:00
#-----------------------------------------------------------------------------
# OneFlow files
#-----------------------------------------------------------------------------
ONEFLOW_FILES = " src/flow/oneflow-server.rb \
src/flow/config.ru"
ONEFLOW_BIN_FILES = "src/flow/bin/oneflow-server"
ONEFLOW_ETC_FILES = "src/flow/etc/oneflow-server.conf"
2020-09-25 11:15:19 +03:00
ONEFLOW_LIB_FILES = " src/flow/lib/grammar.treetop \
2013-07-10 15:29:53 +04:00
src/flow/lib/LifeCycleManager.rb \
2020-02-19 16:55:20 +03:00
src/flow/lib/ServiceWatchDog.rb \
src/flow/lib/ServiceAutoScaler.rb \
2013-07-10 15:29:53 +04:00
src/flow/lib/log.rb \
2013-07-10 20:32:05 +04:00
src/flow/lib/models.rb \
2013-07-10 15:29:53 +04:00
src/flow/lib/strategy.rb \
2020-02-19 16:55:20 +03:00
src/flow/lib/EventManager.rb"
2013-07-10 15:29:53 +04:00
2013-07-10 20:32:05 +04:00
ONEFLOW_LIB_STRATEGY_FILES = "src/flow/lib/strategy/straight.rb"
ONEFLOW_LIB_MODELS_FILES = " src/flow/lib/models/role.rb \
2020-09-25 11:15:19 +03:00
src/flow/lib/models/service.rb"
2013-07-10 20:32:05 +04:00
2020-11-25 13:59:47 +03:00
#-----------------------------------------------------------------------------
# Onecfg files
#-----------------------------------------------------------------------------
ONECFG_BIN_FILES = "src/onecfg/bin/onecfg"
ONECFG_LIB_FILES = " src/onecfg/lib/onecfg.rb
src/onecfg/lib/common.rb \
src/onecfg/lib/config.rb \
src/onecfg/lib/exception.rb \
src/onecfg/lib/settings.rb \
2021-01-05 12:38:14 +03:00
src/onecfg/lib/transaction.rb \
src/onecfg/lib/patch.rb \
2020-11-25 13:59:47 +03:00
src/onecfg/lib/version.rb"
2021-01-05 12:38:14 +03:00
ONECFG_LIB_COMMON_FILES = " src/onecfg/lib/common/backup.rb \
src/onecfg/lib/common/parser.rb"
2020-11-25 13:59:47 +03:00
ONECFG_LIB_COMMON_HELPERS_FILES = "src/onecfg/lib/common/helpers/onecfg_helper.rb"
ONECFG_LIB_COMMON_LOGGER_FILES = "src/onecfg/lib/common/logger/cli_logger.rb"
ONECFG_LIB_CONFIG_FILES = " src/onecfg/lib/config/exception.rb \
2021-01-05 12:38:14 +03:00
src/onecfg/lib/config/files.rb \
2020-11-25 13:59:47 +03:00
src/onecfg/lib/config/fsops.rb \
src/onecfg/lib/config/type.rb \
src/onecfg/lib/config/utils.rb"
ONECFG_LIB_CONFIG_TYPE_FILES = " src/onecfg/lib/config/type/augeas.rb \
src/onecfg/lib/config/type/base.rb \
src/onecfg/lib/config/type/simple.rb \
src/onecfg/lib/config/type/yaml.rb"
ONECFG_LIB_CONFIG_TYPE_AUGEAS_FILES = " src/onecfg/lib/config/type/augeas/one.rb \
2021-01-05 12:38:14 +03:00
src/onecfg/lib/config/type/augeas/shell.rb"
2020-11-25 13:59:47 +03:00
ONECFG_LIB_CONFIG_TYPE_YAML_FILES = "src/onecfg/lib/config/type/yaml/strict.rb"
2021-01-05 12:38:14 +03:00
ONECFG_LIB_PATCH_FILES = "src/onecfg/lib/patch/apply.rb"
2020-11-25 13:59:47 +03:00
ONECFG_SHARE_ETC_FILES = "src/onecfg/share/etc/files.yaml"
2019-09-09 15:43:51 +03:00
#-----------------------------------------------------------------------------
# OneHem files
#-----------------------------------------------------------------------------
ONEHEM_FILES = "src/hem/onehem-server.rb"
ONEHEM_BIN_FILES = "src/hem/bin/onehem-server"
ONEHEM_ETC_FILES = "src/hem/etc/onehem-server.conf"
2020-11-27 13:11:14 +03:00
#-----------------------------------------------------------------------------
# Dockerfiles templates
#-----------------------------------------------------------------------------
DOCKERFILES_TEMPLATES = " src/datastore_mad/remotes/dockerhub/dockerfiles/alpine \
2021-04-01 18:33:39 +03:00
src/datastore_mad/remotes/dockerhub/dockerfiles/centos7 \
src/datastore_mad/remotes/dockerhub/dockerfiles/centos8 \
2020-11-27 13:11:14 +03:00
src/datastore_mad/remotes/dockerhub/dockerfiles/debian"
DOCKERFILE_TEMPLATE = "src/datastore_mad/remotes/dockerhub/dockerfile"
2018-05-09 18:35:44 +03:00
#-----------------------------------------------------------------------------
# Docker Machine files
#-----------------------------------------------------------------------------
DOCKER_MACHINE_BIN_FILES = "src/docker_machine/src/docker_machine/bin/docker-machine-driver-opennebula"
2013-07-10 20:32:05 +04:00
2020-05-15 19:51:04 +03:00
#-----------------------------------------------------------------------------
# SSH files
#-----------------------------------------------------------------------------
SSH_SH_LIB_FILES = "share/ssh/bin/ssh-socks-cleaner"
SSH_SH_OVERRIDE_LIB_FILES = "share/ssh/bin/ssh"
SSH_SHARE_FILES = " share/ssh/etc/config \
share/ssh/etc/config-pre7.6"
2010-10-14 19:24:01 +04:00
#-----------------------------------------------------------------------------
# MAN files
#-----------------------------------------------------------------------------
2014-04-10 14:23:20 +04:00
MAN_FILES = " share/man/oneacct.1.gz \
2014-11-07 14:27:54 +03:00
share/man/oneshowback.1.gz \
2011-07-15 18:53:34 +04:00
share/man/oneacl.1.gz \
2019-10-02 11:40:05 +03:00
share/man/onehook.1.gz \
2023-07-03 19:15:52 +03:00
share/man/onebackupjob.1.gz \
2022-04-19 12:26:22 +03:00
share/man/onelog.1.gz \
share/man/oneirb.1.gz \
2011-05-17 14:26:52 +04:00
share/man/onehost.1.gz \
share/man/oneimage.1.gz \
share/man/oneuser.1.gz \
share/man/onevm.1.gz \
share/man/onevnet.1.gz \
share/man/onetemplate.1.gz \
2011-07-19 15:09:45 +04:00
share/man/onegroup.1.gz \
2021-01-11 23:12:28 +03:00
share/man/onecfg.1.gz \
2011-05-17 14:26:52 +04:00
share/man/onedb.1.gz \
2012-02-09 21:55:18 +04:00
share/man/onedatastore.1.gz \
2012-02-24 18:53:53 +04:00
share/man/onecluster.1.gz \
2013-12-12 22:10:12 +04:00
share/man/onezone.1.gz \
2014-11-25 18:54:00 +03:00
share/man/onevcenter.1.gz \
2013-07-10 15:29:53 +04:00
share/man/oneflow.1.gz \
share/man/oneflow-template.1.gz \
2018-11-29 17:14:17 +03:00
share/man/oneprovision.1.gz \
2020-10-14 16:40:08 +03:00
share/man/oneprovider.1.gz \
2014-09-08 13:59:13 +04:00
share/man/onesecgroup.1.gz \
2014-12-19 19:30:00 +03:00
share/man/onevdc.1.gz \
2015-11-30 18:55:22 +03:00
share/man/onevrouter.1.gz \
2015-12-06 01:52:28 +03:00
share/man/onemarket.1.gz \
2015-12-11 17:53:19 +03:00
share/man/onemarketapp.1.gz \
2017-03-10 13:12:45 +03:00
share/man/onevmgroup.1.gz \
2022-10-08 03:13:14 +03:00
share/man/onevntemplate.1.gz"
2010-10-14 19:24:01 +04:00
2015-02-24 17:41:14 +03:00
#-----------------------------------------------------------------------------
# Docs Files
#-----------------------------------------------------------------------------
2020-06-05 16:05:32 +03:00
DOCS_FILES = "LICENSE LICENSE.onsla LICENSE.onsla-nc NOTICE README.md"
2015-02-24 17:41:14 +03:00
2013-05-20 19:08:45 +04:00
#-----------------------------------------------------------------------------
# Ruby VENDOR files
#-----------------------------------------------------------------------------
2021-05-19 17:46:47 +03:00
VENDOR_DIRS = "share/vendor/ruby/gems/packethost"
2013-05-20 19:08:45 +04:00
2020-05-18 03:23:29 +03:00
#-------------------------------------------------------------------------------
# Libvirt RelaxNG schemas
#-------------------------------------------------------------------------------
LIBVIRT_RNG_SHARE_MODULE_FILES = " share/schemas/libvirt/basictypes.rng \
share/schemas/libvirt/cputypes.rng \
share/schemas/libvirt/domaincaps.rng \
share/schemas/libvirt/domaincheckpoint.rng \
share/schemas/libvirt/domaincommon.rng \
share/schemas/libvirt/domain.rng \
share/schemas/libvirt/domainsnapshot.rng \
share/schemas/libvirt/networkcommon.rng \
share/schemas/libvirt/nwfilter_params.rng \
share/schemas/libvirt/storagecommon.rng"
2020-07-14 18:42:30 +03:00
#-------------------------------------------------------------------------------
# XSD
#-------------------------------------------------------------------------------
XSD_FILES = " share/doc/xsd/acct.xsd \
share/doc/xsd/acl_pool.xsd
share/doc/xsd/api_info.xsd
2023-07-03 19:15:52 +03:00
share/doc/xsd/backupjob.xsd
share/doc/xsd/backupjob_pool.xsd
2020-07-14 18:42:30 +03:00
share/doc/xsd/cluster.xsd
share/doc/xsd/cluster_pool.xsd
share/doc/xsd/datastore.xsd
share/doc/xsd/datastore_pool.xsd
share/doc/xsd/document.xsd
share/doc/xsd/document_pool.xsd
share/doc/xsd/group.xsd
share/doc/xsd/group_pool.xsd
share/doc/xsd/hook.xsd
share/doc/xsd/hook_message_api.xsd
share/doc/xsd/hook_message_retry.xsd
share/doc/xsd/hook_message_state.xsd
share/doc/xsd/hook_pool.xsd
share/doc/xsd/host.xsd
share/doc/xsd/host_pool.xsd
share/doc/xsd/image.xsd
share/doc/xsd/image_pool.xsd
share/doc/xsd/index.xsd
share/doc/xsd/marketplace.xsd
share/doc/xsd/marketplace_pool.xsd
share/doc/xsd/marketplaceapp.xsd
share/doc/xsd/marketplaceapp_pool.xsd
2021-06-30 12:29:45 +03:00
share/doc/xsd/monitoring_data.xsd
share/doc/xsd/opennebula_configuration.xsd
2020-07-14 18:42:30 +03:00
share/doc/xsd/raftstatus.xsd
share/doc/xsd/security_group.xsd
share/doc/xsd/security_group_pool.xsd
2023-07-03 19:15:52 +03:00
share/doc/xsd/shared.xsd
2020-07-14 18:42:30 +03:00
share/doc/xsd/showback.xsd
share/doc/xsd/user.xsd
share/doc/xsd/user_pool.xsd
share/doc/xsd/vdc.xsd
share/doc/xsd/vdc_pool.xsd
share/doc/xsd/vm.xsd
share/doc/xsd/vm_group.xsd
share/doc/xsd/vm_group_pool.xsd
share/doc/xsd/vm_pool.xsd
share/doc/xsd/vmtemplate.xsd
share/doc/xsd/vmtemplate_pool.xsd
share/doc/xsd/vnet.xsd
share/doc/xsd/vnet_pool.xsd
share/doc/xsd/vntemplate.xsd
share/doc/xsd/vntemplate_pool.xsd
share/doc/xsd/vrouter.xsd
share/doc/xsd/vrouter_pool.xsd
share/doc/xsd/zone.xsd
share/doc/xsd/zone_pool.xsd"
2020-07-28 15:59:13 +03:00
2020-06-25 12:24:51 +03:00
CONTEXT_SHARE = $( find share/context/ -type f \( ! -iname "*.sh" ! -iname "SConstruct" \) )
2009-10-19 21:35:50 +04:00
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
2009-01-02 17:58:51 +03:00
# INSTALL.SH SCRIPT
2009-10-19 21:35:50 +04:00
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
2009-01-02 17:58:51 +03:00
# --- Create OpenNebula directories ---
2010-07-11 22:39:10 +04:00
if [ " $UNINSTALL " = "no" ] ; then
2009-01-02 17:58:51 +03:00
for d in $MAKE_DIRS ; do
2009-01-19 19:40:46 +03:00
mkdir -p $DESTDIR $d
2009-01-02 17:58:51 +03:00
done
fi
2008-11-13 19:21:17 +03:00
2009-01-02 17:58:51 +03:00
# --- Install/Uninstall files ---
2008-12-02 18:52:03 +03:00
2009-01-02 17:58:51 +03:00
do_file( ) {
if [ " $UNINSTALL " = "yes" ] ; then
2012-10-05 17:00:48 +04:00
rm $DESTDIR $2 /` basename $1 `
2009-01-02 17:58:51 +03:00
else
2009-08-27 21:25:39 +04:00
if [ " $LINK " = "yes" ] ; then
ln -s $SRC_DIR /$1 $DESTDIR $2
else
2015-07-21 19:21:18 +03:00
cp -RL $SRC_DIR /$1 $DESTDIR $2
2009-08-27 21:25:39 +04:00
fi
2009-01-02 17:58:51 +03:00
fi
}
2008-12-02 18:52:03 +03:00
2011-02-23 19:27:17 +03:00
if [ " $CLIENT " = "yes" ] ; then
2011-02-10 17:57:11 +03:00
INSTALL_SET = ${ INSTALL_CLIENT_FILES [@] }
2013-06-27 13:36:08 +04:00
elif [ " $ONEGATE " = "yes" ] ; then
INSTALL_SET = " ${ INSTALL_ONEGATE_FILES [@] } "
2022-11-14 22:48:30 +03:00
elif [ " $ONEGATE_PROXY " = "yes" ] ; then
INSTALL_SET = " ${ INSTALL_ONEGATE_PROXY_FILES [@] } "
2011-02-23 19:27:17 +03:00
elif [ " $SUNSTONE " = "yes" ] ; then
2015-07-17 17:44:32 +03:00
if [ " $SUNSTONE_DEV " = "no" ] ; then
INSTALL_SET = " ${ INSTALL_SUNSTONE_RUBY_FILES [@] } \
${ INSTALL_SUNSTONE_PUBLIC_MINIFIED_FILES [@] }
${ INSTALL_SUNSTONE_FILES [@] } "
else
INSTALL_SET = " ${ INSTALL_SUNSTONE_RUBY_FILES [@] } \
${ INSTALL_SUNSTONE_PUBLIC_DEV_DIR [@] }
${ INSTALL_SUNSTONE_FILES [@] } "
fi
2020-07-02 17:12:39 +03:00
elif [ " $FIREEDGE " = "yes" ] ; then
if [ " $FIREEDGE_DEV " = "no" ] ; then
2020-10-08 17:04:59 +03:00
INSTALL_SET = " ${ INSTALL_FIREEDGE_FILES [@] } "
2020-07-02 17:12:39 +03:00
else
INSTALL_SET = " ${ INSTALL_FIREEDGE_DEV_DIRS [@] } \
2020-10-08 17:04:59 +03:00
${ INSTALL_FIREEDGE_FILES [@] } "
2020-07-02 17:12:39 +03:00
fi
2013-07-10 15:29:53 +04:00
elif [ " $ONEFLOW " = "yes" ] ; then
INSTALL_SET = " ${ INSTALL_ONEFLOW_FILES [@] } "
2018-05-09 18:35:44 +03:00
elif [ " $DOCKER_MACHINE " = "yes" ] ; then
INSTALL_SET = " ${ INSTALL_DOCKER_MACHINE_FILES [@] } "
2015-07-17 16:48:30 +03:00
elif [ " $SUNSTONE_DEV " = "no" ] ; then
INSTALL_SET = " ${ INSTALL_FILES [@] } \
${ INSTALL_SUNSTONE_FILES [@] } ${ INSTALL_SUNSTONE_PUBLIC_MINIFIED_FILES [@] } \
${ INSTALL_ONEGATE_FILES [@] } \
2018-11-29 17:14:17 +03:00
${ INSTALL_ONEFLOW_FILES [@] } \
2019-09-09 15:43:51 +03:00
${ INSTALL_ONEHEM_FILES [@] } \
2020-11-25 13:59:47 +03:00
${ INSTALL_ONEPROVISION_FILES [@] } \
${ INSTALL_ONECFG_FILES [@] } "
2020-07-02 17:12:39 +03:00
elif [ " $FIREEDGE_DEV " = "no" ] ; then
INSTALL_SET = " ${ INSTALL_FILES [@] } \
${ INSTALL_FIREEDGE_MINIFIED_DIRS [@] } \
${ INSTALL_ONEGATE_FILES [@] } \
${ INSTALL_ONEFLOW_FILES [@] } \
${ INSTALL_ONEHEM_FILES [@] } \
2020-11-25 13:59:47 +03:00
${ INSTALL_ONEPROVISION_FILES [@] } \
${ INSTALL_ONECFG_FILES [@] } "
2011-02-23 19:27:17 +03:00
else
2014-01-29 18:53:19 +04:00
INSTALL_SET = " ${ INSTALL_FILES [@] } \
2015-07-17 16:48:30 +03:00
${ INSTALL_SUNSTONE_FILES [@] } ${ INSTALL_SUNSTONE_PUBLIC_DEV_DIR [@] } \
2020-10-08 17:04:59 +03:00
${ INSTALL_FIREEDGE_FILES [@] } ${ INSTALL_FIREEDGE_DEV_DIRS [@] } \
2015-07-17 16:48:30 +03:00
${ INSTALL_ONEGATE_FILES [@] } \
2022-11-14 22:48:30 +03:00
${ INSTALL_ONEGATE_PROXY_FILES [@] } \
2018-11-29 17:14:17 +03:00
${ INSTALL_ONEFLOW_FILES [@] } \
2019-09-09 15:43:51 +03:00
${ INSTALL_ONEHEM_FILES [@] } \
2020-11-25 13:59:47 +03:00
${ INSTALL_ONEPROVISION_FILES [@] } \
${ INSTALL_ONECFG_FILES [@] } "
2009-09-25 22:18:50 +04:00
fi
for i in ${ INSTALL_SET [@] } ; do
2009-01-02 17:58:51 +03:00
SRC = $` echo $i | cut -d: -f1`
DST = ` echo $i | cut -d: -f2`
2010-07-11 22:39:10 +04:00
eval SRC_FILES = $SRC
for f in $SRC_FILES ; do
2009-01-02 17:58:51 +03:00
do_file $f $DST
done
done
2008-11-13 19:21:17 +03:00
2011-02-23 19:27:17 +03:00
if [ " $INSTALL_ETC " = "yes" ] ; then
2011-06-21 19:15:55 +04:00
if [ " $SUNSTONE " = "yes" ] ; then
INSTALL_ETC_SET = " ${ INSTALL_SUNSTONE_ETC_FILES [@] } "
2021-02-02 14:49:40 +03:00
elif [ " $FIREEDGE " = "yes" ] ; then
2021-09-16 19:04:11 +03:00
INSTALL_ETC_SET = " ${ INSTALL_FIREEDGE_ETC_FILES [@] } "
2013-06-27 13:36:08 +04:00
elif [ " $ONEGATE " = "yes" ] ; then
INSTALL_ETC_SET = " ${ INSTALL_ONEGATE_ETC_FILES [@] } "
2022-11-14 22:48:30 +03:00
elif [ " $ONEGATE_PROXY " = "yes" ] ; then
INSTALL_ETC_SET = " ${ INSTALL_ONEGATE_PROXY_ETC_FILES [@] } "
2013-07-10 15:29:53 +04:00
elif [ " $ONEFLOW " = "yes" ] ; then
INSTALL_ETC_SET = " ${ INSTALL_ONEFLOW_ETC_FILES [@] } "
2011-06-21 19:15:55 +04:00
else
INSTALL_ETC_SET = " ${ INSTALL_ETC_FILES [@] } \
2011-09-07 14:27:34 +04:00
${ INSTALL_SUNSTONE_ETC_FILES [@] } \
2021-02-02 14:49:40 +03:00
${ INSTALL_FIREEDGE_ETC_FILES [@] } \
2013-07-10 15:29:53 +04:00
${ INSTALL_ONEGATE_ETC_FILES [@] } \
2022-11-14 22:48:30 +03:00
${ INSTALL_ONEGATE_PROXY_ETC_FILES [@] } \
2019-09-09 15:43:51 +03:00
${ INSTALL_ONEHEM_ETC_FILES [@] } \
2013-07-10 15:29:53 +04:00
${ INSTALL_ONEFLOW_ETC_FILES [@] } "
2011-06-21 19:15:55 +04:00
fi
for i in ${ INSTALL_ETC_SET [@] } ; do
2009-01-02 17:58:51 +03:00
SRC = $` echo $i | cut -d: -f1`
DST = ` echo $i | cut -d: -f2`
2010-07-11 22:39:10 +04:00
2009-08-27 21:25:39 +04:00
eval SRC_FILES = $SRC
2010-07-11 22:39:10 +04:00
2009-08-27 21:25:39 +04:00
OLD_LINK = $LINK
2014-04-24 17:43:16 +04:00
LINK = "no"
2010-07-11 22:39:10 +04:00
for f in $SRC_FILES ; do
2009-01-02 17:58:51 +03:00
do_file $f $DST
done
2010-07-11 22:39:10 +04:00
2009-08-27 21:25:39 +04:00
LINK = $OLD_LINK
2009-01-02 17:58:51 +03:00
done
fi
2008-06-17 20:27:32 +04:00
2009-01-02 17:58:51 +03:00
# --- Set ownership or remove OpenNebula directories ---
2008-12-02 19:16:20 +03:00
2010-07-11 22:39:10 +04:00
if [ " $UNINSTALL " = "no" ] ; then
2018-05-08 16:10:03 +03:00
if [ " $SUNSTONE " = "yes" ] || [ " $SUNSTONE_DEV " = "yes" ] ; then
2018-05-09 16:14:34 +03:00
touch $DESTDIR $VAR_LOCATION /sunstone/main.js
2018-05-09 18:52:48 +03:00
rm -f $DESTDIR $SUNSTONE_LOCATION /public/dist/main.js
2018-05-09 16:14:34 +03:00
ln -s $VAR_LOCATION /sunstone/main.js $DESTDIR $SUNSTONE_LOCATION /public/dist/main.js
2018-05-08 16:10:03 +03:00
fi
2009-01-19 19:40:46 +03:00
for d in $CHOWN_DIRS ; do
2009-10-21 19:48:46 +04:00
chown -R $ONEADMIN_USER :$ONEADMIN_GROUP $DESTDIR $d
2009-01-19 19:40:46 +03:00
done
2009-01-02 17:58:51 +03:00
else
2009-05-07 19:23:13 +04:00
for d in ` echo $DELETE_DIRS | awk '{for (i=NF;i>=1;i--) printf $i" "}' ` ; do
2009-01-02 17:58:51 +03:00
rmdir $d
done
fi