1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-22 18:50:08 +03:00
This commit is contained in:
Tino Vazquez 2022-08-26 10:47:39 +02:00
commit 193b789c13
32 changed files with 3093 additions and 2225 deletions

View File

@ -250,7 +250,6 @@ SHARE_DIRS="$SHARE_LOCATION/examples \
$SHARE_LOCATION/examples/network_hooks \
$SHARE_LOCATION/websockify \
$SHARE_LOCATION/websockify/websockify \
$SHARE_LOCATION/esx-fw-vnc \
$SHARE_LOCATION/oneprovision \
$SHARE_LOCATION/dockerhub \
$SHARE_LOCATION/dockerhub/dockerfiles \
@ -754,7 +753,6 @@ INSTALL_FILES=(
LXD_NETWORK_HOOKS:$SHARE_LOCATION/examples/network_hooks
WEBSOCKIFY_SHARE_RUN_FILES:$SHARE_LOCATION/websockify
WEBSOCKIFY_SHARE_MODULE_FILES:$SHARE_LOCATION/websockify/websockify
ESX_FW_VNC_SHARE_FILES:$SHARE_LOCATION/esx-fw-vnc
INSTALL_GEMS_SHARE_FILES:$SHARE_LOCATION
ONETOKEN_SHARE_FILE:$SHARE_LOCATION
FOLLOWER_CLEANUP_SHARE_FILE:$SHARE_LOCATION

View File

@ -0,0 +1,263 @@
GEM
remote: https://rubygems.org/
specs:
activesupport (4.2.11.3)
i18n (~> 0.7)
minitest (~> 5.1)
thread_safe (~> 0.3, >= 0.3.4)
tzinfo (~> 1.1)
addressable (2.4.0)
android_key_attestation (0.3.0)
augeas (0.6.4)
awrence (1.2.1)
aws-eventstream (1.2.0)
aws-partitions (1.610.0)
aws-sdk-cloudwatch (1.65.0)
aws-sdk-core (~> 3, >= 3.127.0)
aws-sigv4 (~> 1.1)
aws-sdk-core (3.131.3)
aws-eventstream (~> 1, >= 1.0.2)
aws-partitions (~> 1, >= 1.525.0)
aws-sigv4 (~> 1.1)
jmespath (~> 1, >= 1.6.1)
aws-sdk-ec2 (1.324.0)
aws-sdk-core (~> 3, >= 3.127.0)
aws-sigv4 (~> 1.1)
aws-sdk-kms (1.58.0)
aws-sdk-core (~> 3, >= 3.127.0)
aws-sigv4 (~> 1.1)
aws-sdk-s3 (1.114.0)
aws-sdk-core (~> 3, >= 3.127.0)
aws-sdk-kms (~> 1)
aws-sigv4 (~> 1.4)
aws-sigv4 (1.5.1)
aws-eventstream (~> 1, >= 1.0.2)
azure_mgmt_compute (0.22.0)
ms_rest_azure (~> 0.12.0)
azure_mgmt_monitor (0.19.0)
ms_rest_azure (~> 0.12.0)
azure_mgmt_network (0.26.1)
ms_rest_azure (~> 0.12.0)
azure_mgmt_resources (0.18.2)
ms_rest_azure (~> 0.12.0)
azure_mgmt_storage (0.23.0)
ms_rest_azure (~> 0.12.0)
bindata (2.4.10)
builder (3.2.4)
cbor (0.5.9.6)
chunky_png (1.4.0)
concurrent-ruby (1.1.10)
configparser (0.1.7)
cose (1.2.1)
cbor (~> 0.5.9)
openssl-signature_algorithm (~> 1.0)
curb (1.0.1)
daemons (1.4.1)
dalli (2.7.11)
domain_name (0.5.20190701)
unf (>= 0.0.5, < 1.0.0)
eventmachine (1.2.7)
faraday (1.10.0)
faraday-em_http (~> 1.0)
faraday-em_synchrony (~> 1.0)
faraday-excon (~> 1.1)
faraday-httpclient (~> 1.0)
faraday-multipart (~> 1.0)
faraday-net_http (~> 1.0)
faraday-net_http_persistent (~> 1.0)
faraday-patron (~> 1.0)
faraday-rack (~> 1.0)
faraday-retry (~> 1.0)
ruby2_keywords (>= 0.0.4)
faraday-cookie_jar (0.0.7)
faraday (>= 0.8.0)
http-cookie (~> 1.0.0)
faraday-em_http (1.0.0)
faraday-em_synchrony (1.0.0)
faraday-excon (1.1.0)
faraday-httpclient (1.0.1)
faraday-multipart (1.0.4)
multipart-post (~> 2)
faraday-net_http (1.0.1)
faraday-net_http_persistent (1.2.0)
faraday-patron (1.0.0)
faraday-rack (1.0.0)
faraday-retry (1.0.3)
faraday_middleware (1.2.0)
faraday (~> 1.0)
ffi (1.15.5)
ffi-rzmq (2.0.7)
ffi-rzmq-core (>= 1.0.7)
ffi-rzmq-core (1.0.7)
ffi
git (1.11.0)
rchardet (~> 1.8)
gnuplot (2.6.2)
hashie (5.0.0)
highline (1.7.10)
http-cookie (1.0.5)
domain_name (~> 0.5)
i18n (0.9.5)
concurrent-ruby (~> 1.0)
inflection (1.0.0)
ipaddress (0.8.3)
jmespath (1.6.1)
json (2.6.2)
jwt (2.4.1)
memcache-client (1.8.5)
mini_mime (1.1.2)
mini_portile2 (2.8.0)
minitest (5.16.2)
ms_rest (0.7.6)
concurrent-ruby (~> 1.0)
faraday (>= 0.9, < 2.0.0)
timeliness (~> 0.3.10)
ms_rest_azure (0.12.0)
concurrent-ruby (~> 1.0)
faraday (>= 0.9, < 2.0.0)
faraday-cookie_jar (~> 0.0.6)
ms_rest (~> 0.7.6)
multipart-post (2.2.3)
mustermann (2.0.2)
ruby2_keywords (~> 0.0.1)
mysql2 (0.5.4)
net-ldap (0.17.1)
nokogiri (1.13.8)
mini_portile2 (~> 2.8.0)
racc (~> 1.4)
openssl (3.0.0)
openssl-signature_algorithm (1.2.1)
openssl (> 2.0, < 3.1)
optimist (3.0.1)
ox (2.14.11)
parse-cron (0.1.4)
pg (1.4.1)
polyglot (0.3.5)
public_suffix (5.0.0)
racc (1.6.0)
rack (2.2.4)
rack-protection (2.2.2)
rack
rbvmomi (3.0.0)
builder (~> 3.2)
json (~> 2.3)
nokogiri (~> 1.10)
optimist (~> 3.0)
rchardet (1.8.0)
rexml (3.2.5)
rotp (6.2.0)
rqrcode (2.1.2)
chunky_png (~> 1.0)
rqrcode_core (~> 1.0)
rqrcode_core (1.2.0)
ruby2_keywords (0.0.5)
safety_net_attestation (0.4.0)
jwt (~> 2.0)
sequel (5.58.0)
sinatra (2.2.2)
mustermann (~> 2.0)
rack (~> 2.2)
rack-protection (= 2.2.2)
tilt (~> 2.0)
sqlite3 (1.4.4)
thin (1.8.1)
daemons (~> 1.0, >= 1.0.9)
eventmachine (~> 1.0, >= 1.0.4)
rack (>= 1, < 3)
thread_safe (0.3.6)
tilt (2.0.11)
timeliness (0.3.10)
tpm-key_attestation (0.11.0)
bindata (~> 2.4)
openssl (> 2.0, < 3.1)
openssl-signature_algorithm (~> 1.0)
treetop (1.6.11)
polyglot (~> 0.3)
tzinfo (1.2.10)
thread_safe (~> 0.1)
unf (0.1.4)
unf_ext
unf_ext (0.0.8.2)
uuidtools (2.2.0)
vsphere-automation-cis (0.4.7)
vsphere-automation-runtime (~> 0.4.6)
vsphere-automation-runtime (0.4.7)
vsphere-automation-vcenter (0.4.7)
vsphere-automation-cis (~> 0.4.6)
vsphere-automation-runtime (~> 0.4.6)
webauthn (2.5.2)
android_key_attestation (~> 0.3.0)
awrence (~> 1.1)
bindata (~> 2.4)
cbor (~> 0.5.9)
cose (~> 1.1)
openssl (>= 2.2, < 3.1)
safety_net_attestation (~> 0.4.0)
tpm-key_attestation (~> 0.11.0)
webrick (1.7.0)
xmlrpc (0.3.2)
webrick
zendesk_api (1.36.0)
faraday (>= 0.9.0, < 2.0.0)
hashie (>= 3.5.2, < 6.0.0)
inflection
mini_mime
multipart-post (~> 2.0)
PLATFORMS
ruby
DEPENDENCIES
activesupport (~> 4.2)
addressable
augeas (~> 0.6)
aws-sdk-cloudwatch
aws-sdk-ec2 (>= 1.151)
aws-sdk-s3
azure_mgmt_compute
azure_mgmt_monitor
azure_mgmt_network
azure_mgmt_resources
azure_mgmt_storage
configparser
curb
dalli (< 3.0)
faraday_middleware (~> 1.2.0)
ffi-rzmq (~> 2.0.7)
git (~> 1.5)
gnuplot
highline (~> 1.7)
i18n (~> 0.9)
ipaddress (~> 0.8.3)
json (>= 2.0)
memcache-client
minitest
mysql2
net-ldap
nokogiri
ox
parse-cron
pg
public_suffix
rack
rbvmomi (~> 3.0.0)
rexml
rotp
rqrcode
sequel
sinatra
sqlite3
thin
treetop (>= 1.6.3)
uuidtools
vsphere-automation-cis (~> 0.4.6)
vsphere-automation-vcenter (~> 0.4.6)
webauthn
xmlrpc
zendesk_api
RUBY VERSION
ruby 3.0.3p157
BUNDLED WITH
1.17.3

View File

@ -83,6 +83,7 @@ elif command -v rpm >/dev/null; then
if command -v dnf >/dev/null; then
dnf -q -y install 'dnf-command(config-manager)'
dnf config-manager --set-enabled powertools || /bin/true
dnf config-manager --set-enabled crb || /bin/true # alma9
fi
yum -q -y install rubygems findutils

View File

@ -405,7 +405,6 @@ AllCops:
- src/cloud/common/CloudAuth.rb
- src/cloud/common/CloudAuth/RemoteCloudAuth.rb
- src/cloud/common/CloudAuth/X509CloudAuth.rb
- src/cloud/common/CloudAuth/EC2CloudAuth.rb
- src/cloud/common/CloudAuth/SunstoneCloudAuth.rb
- src/cloud/common/CloudAuth/OneGateCloudAuth.rb
- src/cloud/common/CloudAuth/OpenNebulaCloudAuth.rb

View File

@ -339,7 +339,7 @@ CommandParser::CmdParser.new(ARGV) do
rescue StandardError => e
STDERR.puts e
end
helper.perform_actions(args[0], options, 'flush') do |host|
helper.perform_actions(args[0], options, 'flushing') do |host|
host.flush action
end
end

View File

@ -234,7 +234,7 @@ CommandParser::CmdParser.new(ARGV) do
command :clone, clone_desc, :imageid, :name,
:options => [OneDatastoreHelper::DATASTORE] do
helper.perform_action(args[0], options, 'cloned') do |image|
helper.perform_action(args[0], options, 'cloning') do |image|
ds_id = options[:datastore] || -1 # -1 clones to self
res = image.clone(args[1], ds_id)
@ -252,7 +252,7 @@ CommandParser::CmdParser.new(ARGV) do
EOT
command :delete, delete_desc, [:range, :imageid_list] do
helper.perform_actions(args[0], options, 'deleted') do |image|
helper.perform_actions(args[0], options, 'deleting') do |image|
image.delete
end
end
@ -382,7 +382,7 @@ CommandParser::CmdParser.new(ARGV) do
EOT
command :'snapshot-delete', snapshot_delete_desc, :imageid, :snapshot_id do
helper.perform_action(args[0], options, 'snapshot deleted') do |o|
helper.perform_action(args[0], options, 'deleting snapshot') do |o|
o.snapshot_delete(args[1].to_i)
end
end
@ -392,7 +392,7 @@ CommandParser::CmdParser.new(ARGV) do
EOT
command :'snapshot-revert', snapshot_revert_desc, :imageid, :snapshot_id do
helper.perform_action(args[0], options, 'image state reverted') do |o|
helper.perform_action(args[0], options, 'reverting image state') do |o|
o.snapshot_revert(args[1].to_i)
end
end
@ -405,7 +405,7 @@ CommandParser::CmdParser.new(ARGV) do
snapshot_flatten_desc,
:imageid,
:snapshot_id do
helper.perform_action(args[0], options, 'snapshot flattened') do |o|
helper.perform_action(args[0], options, 'flattening snapshot') do |o|
o.snapshot_flatten(args[1].to_i)
end
end

View File

@ -284,7 +284,7 @@ CommandParser::CmdParser.new(ARGV) do
EOT
command :export, export_desc, :appid, :name, :options => EXPORT_OPTIONS do
helper.perform_action(args[0], options, 'exported') do |obj|
helper.perform_action(args[0], options, 'exporting') do |obj|
tag ="tag=#{options[:tag]}" if options[:tag]
obj.extend(MarketPlaceAppExt)
@ -325,7 +325,7 @@ CommandParser::CmdParser.new(ARGV) do
command :download, download_desc, :appid, :path,
:options => [OpenNebulaHelper::FORCE] do
helper.perform_action(args[0], options, 'downloaded') do
helper.perform_action(args[0], options, 'downloading') do
download_args = [:marketplaceapp, args[0], args[1], options[:force]]
OpenNebulaHelper.download_resource_sunstone(*download_args)
end

View File

@ -517,7 +517,7 @@ CommandParser::CmdParser.new(ARGV) do
if !options[:schedule].nil?
helper.schedule_actions(args[0], options, command_name)
else
helper.perform_actions(args[0], options, 'terminated') do |vm|
helper.perform_actions(args[0], options, 'terminating') do |vm|
vm.terminate(options[:hard] == true)
end
end
@ -833,7 +833,7 @@ CommandParser::CmdParser.new(ARGV) do
template << ' ]'
end
helper.perform_action(args[0], options, 'Attach disk') do |vm|
helper.perform_action(args[0], options, 'Attaching disk') do |vm|
vm.disk_attach(template)
end
end
@ -847,7 +847,7 @@ CommandParser::CmdParser.new(ARGV) do
command :"disk-detach", disk_detach_desc, :vmid, :diskid do
diskid = args[1].to_i
helper.perform_action(args[0], options, 'Detach disk') do |vm|
helper.perform_action(args[0], options, 'Detaching disk') do |vm|
vm.disk_detach(diskid)
end
end
@ -910,7 +910,7 @@ CommandParser::CmdParser.new(ARGV) do
end
end
helper.perform_action(args[0], options, 'Attach NIC') do |vm|
helper.perform_action(args[0], options, 'Attaching NIC') do |vm|
vm.nic_attach(template)
end
end
@ -924,7 +924,7 @@ CommandParser::CmdParser.new(ARGV) do
command :"nic-detach", nic_detach_desc, :vmid, :nicid do
nicid = args[1].to_i
helper.perform_action(args[0], options, 'Detach NIC') do |vm|
helper.perform_action(args[0], options, 'Detaching NIC') do |vm|
vm.nic_detach(nicid)
end
end
@ -939,7 +939,7 @@ CommandParser::CmdParser.new(ARGV) do
nic_id = args[1].to_i
sg_id = args[2].to_i
helper.perform_action(args[0], options, 'Attach SG') do |vm|
helper.perform_action(args[0], options, 'Attaching SG') do |vm|
vm.sg_attach(nic_id, sg_id)
end
end
@ -954,7 +954,7 @@ CommandParser::CmdParser.new(ARGV) do
nic_id = args[1].to_i
sg_id = args[2].to_i
helper.perform_action(args[0], options, 'Detach SG') do |vm|
helper.perform_action(args[0], options, 'Detaching SG') do |vm|
vm.sg_detach(nic_id, sg_id)
end
end
@ -1047,7 +1047,7 @@ CommandParser::CmdParser.new(ARGV) do
helper.schedule_actions(args[0], options, @comm_name)
else
helper.perform_actions(args[0], options, 'snapshot created') do |o|
helper.perform_actions(args[0], options, 'creating snapshot') do |o|
o.snapshot_create(args[1])
end
end
@ -1071,7 +1071,7 @@ CommandParser::CmdParser.new(ARGV) do
helper.schedule_actions([args[0]], options, @comm_name)
else
helper.perform_action(args[0], options, 'snapshot reverted') do |o|
helper.perform_action(args[0], options, 'reverting snapshot') do |o|
o.snapshot_revert(args[1].to_i)
end
end
@ -1095,7 +1095,7 @@ CommandParser::CmdParser.new(ARGV) do
helper.schedule_actions([args[0]], options, @comm_name)
else
helper.perform_action(args[0], options, 'snapshot deleted') do |o|
helper.perform_action(args[0], options, 'deleting snapshot') do |o|
o.snapshot_delete(args[1])
end
end
@ -1123,7 +1123,7 @@ CommandParser::CmdParser.new(ARGV) do
helper.schedule_actions([args[0]], options, @comm_name)
else
helper.perform_action(args[0], options,
'disk snapshot created') do |o|
'creating disk snapshot') do |o|
o.disk_snapshot_create(args[1].to_i, args[2])
end
end
@ -1150,7 +1150,7 @@ CommandParser::CmdParser.new(ARGV) do
helper.schedule_actions([args[0]], options, @comm_name)
else
helper.perform_action(args[0], options,
'disk snapshot reverted') do |o|
'reverting disk snapshot') do |o|
o.disk_snapshot_revert(args[1].to_i, args[2].to_i)
end
end
@ -1177,7 +1177,7 @@ CommandParser::CmdParser.new(ARGV) do
helper.schedule_actions([args[0]], options, @comm_name)
else
helper.perform_action(args[0], options,
'disk snapshot deleted') do |o|
'deleting disk snapshot') do |o|
o.disk_snapshot_delete(args[1].to_i, args[2].to_i)
end
end
@ -1213,7 +1213,7 @@ CommandParser::CmdParser.new(ARGV) do
command :"disk-resize", disk_resize_desc,
:vmid, :diskid, :size do
helper.perform_action(args[0], options, 'disk resized') do |o|
helper.perform_action(args[0], options, 'resizing disk') do |o|
o.info
size = o["/VM/TEMPLATE/DISK[DISK_ID='#{args[1]}']/SIZE"].to_i

View File

@ -176,7 +176,7 @@ CommandParser::CmdParser.new(ARGV) do
EOT
command :delete, delete_desc, [:range, :vnetid_list] do
helper.perform_actions(args[0], options, 'deleted') do |vn|
helper.perform_actions(args[0], options, 'deleting') do |vn|
vn.delete
end
end
@ -187,7 +187,7 @@ CommandParser::CmdParser.new(ARGV) do
command :addar, addar_desc, :vnetid, [:file, nil],
:options => STD_OPTIONS + OneVNetHelper::ADDAR_OPTIONS do
helper.perform_action(args[0], options, 'lease added') do |vn|
helper.perform_action(args[0], options, 'address range added') do |vn|
if args[1]
ar = File.read(args[1])
else

View File

@ -19,9 +19,7 @@ require 'thread'
class CloudAuth
# These are the authentication methods for the user requests
AUTH_MODULES = {
"occi" => 'OCCICloudAuth',
"sunstone" => 'SunstoneCloudAuth' ,
"ec2" => 'EC2CloudAuth',
"x509" => 'X509CloudAuth',
"remote" => 'RemoteCloudAuth',
"opennebula" => 'OpenNebulaCloudAuth',

View File

@ -17,18 +17,41 @@ clean() {
rm -rf ./dist ./node_modules
}
openssl_legacy() {
cat > /tmp/openssl.conf <<EOF
openssl_conf = openssl_init
[openssl_init]
providers = provider_sect
[provider_sect]
default = default_sect
legacy = legacy_sect
[default_sect]
activate = 1
[legacy_sect]
activate = 1
EOF
export OPENSSL_CONF="/tmp/openssl.conf"
}
dependencies() {
npm i --production
}
install() {
clean
openssl_legacy
dependencies
npm run build
}
install_enterprise() {
clean
openssl_legacy
dependencies
# npm run build-enterprise
npm run build
@ -76,4 +99,5 @@ if [ "$ENTERPRISE" = "yes" ]; then
exit 0
fi
install
install

File diff suppressed because it is too large Load Diff

View File

@ -69,6 +69,7 @@
"babel-loader": "8.2.1",
"babel-plugin-module-resolver": "4.0.0",
"btoa": "1.2.1",
"chartist": "0.10.1",
"clsx": "1.1.1",
"compression": "1.7.4",
"copy-webpack-plugin": "9.0.1",
@ -108,6 +109,7 @@
"qrcode": "1.4.4",
"react": "17.0.2",
"react-beautiful-dnd": "13.1.0",
"react-chartist": "0.14.4",
"react-dom": "17.0.2",
"react-flow-renderer": "9.6.0",
"react-hook-form": "7.18.1",

View File

@ -0,0 +1,169 @@
/* ------------------------------------------------------------------------- *
* Copyright 2002-2022, OpenNebula Project, OpenNebula Systems *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may *
* not use this file except in compliance with the License. You may obtain *
* a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* ------------------------------------------------------------------------- */
import { JSXElementConstructor, useMemo } from 'react'
import PropTypes from 'prop-types'
import 'chartist/dist/chartist.min.css'
import {
Grid,
CircularProgress,
Stack,
Paper,
List,
ListItem,
Typography,
} from '@mui/material'
import ChartistGraph from 'react-chartist'
import { FixedScaleAxis } from 'chartist'
import makeStyles from '@mui/styles/makeStyles'
const useStyles = makeStyles(({ palette, typography }) => ({
graphStyle: {
'& .ct-series-a .ct-bar, .ct-series-a .ct-line, .ct-series-a .ct-point, .ct-series-a .ct-slice-donut':
{ stroke: palette.secondary.main, strokeWidth: '1px' },
'& .ct-grid': {
stroke: 'rgba(150,150,150,.1)',
strokeDasharray: '1px',
},
'&': {
width: '100%',
},
},
box: {
paddingBottom: '0px',
},
title: {
fontWeight: typography.fontWeightBold,
borderBottom: `1px solid ${palette.divider}`,
},
}))
/**
* Represents a Chartist Graph.
*
* @param {object} props - Props
* @param {object[]} props.data - Chart data
* @param {Function} props.interpolationX - Chartist interpolation X
* @param {Function} props.interpolationY - Chartist interpolation Y
* @param {string} props.name - Chartist name
* @param {string} props.filter - Chartist filter
* @param {string} props.x - Chartist X
* @param {string} props.y - Chartist X
* @returns {JSXElementConstructor} Chartist component
*/
const Chartist = ({
data = [],
interpolationX,
interpolationY,
name = '',
filter = [],
x = '',
y = '',
}) => {
const classes = useStyles()
const chartOptions = {
fullWidth: true,
reverseData: true,
low: 0,
scaleMinSpace: 10,
axisX: {
type: FixedScaleAxis,
divisor: 10,
},
axisY: {
offset: 70,
},
}
const dataChart = {
name,
}
typeof interpolationX === 'function' &&
(chartOptions.axisX.labelInterpolationFnc = interpolationX)
typeof interpolationY === 'function' &&
(chartOptions.axisY.labelInterpolationFnc = interpolationY)
filter?.length &&
(dataChart.data = useMemo(
() =>
data
?.filter((point) =>
Object.keys(point).find((key) => filter.includes(key))
)
.map((point) => ({
x: +point[x],
y: +point[y],
})),
[data]
))
return (
<Grid item xs={12} sm={6}>
{!data?.length ? (
<Stack direction="row" justifyContent="center" alignItems="center">
<CircularProgress color="secondary" />
</Stack>
) : (
<Paper variant="outlined" sx={{ height: 'fit-content' }}>
<List className={classes.box}>
<ListItem className={classes.title}>
<Typography noWrap>{name}</Typography>
</ListItem>
<ListItem className={classes.title}>
<ChartistGraph
className={classes.graphStyle}
data={{ series: [dataChart] }}
options={chartOptions}
type="Line"
/>
</ListItem>
</List>
</Paper>
)}
</Grid>
)
}
Chartist.propTypes = {
name: PropTypes.string,
filter: PropTypes.arrayOf(PropTypes.string),
data: PropTypes.arrayOf(
PropTypes.shape({
TIMESTAMP: PropTypes.string,
DISK_SIZE: PropTypes.arrayOf(PropTypes.shape({})),
ID: PropTypes.string,
CPU: PropTypes.string,
DISKRDBYTES: PropTypes.string,
DISKRDIOPS: PropTypes.string,
DISKWRBYTES: PropTypes.string,
DISKWRIOPS: PropTypes.string,
MEMORY: PropTypes.string,
NETRX: PropTypes.string,
NETTX: PropTypes.string,
})
),
x: PropTypes.string,
y: PropTypes.string,
interpolationX: PropTypes.func,
interpolationY: PropTypes.func,
}
Chartist.displayName = 'Chartist'
export default Chartist

View File

@ -15,5 +15,6 @@
* ------------------------------------------------------------------------- */
import CircleChart from 'client/components/Charts/CircleChart'
import SingleBar from 'client/components/Charts/SingleBar'
import Chartist from 'client/components/Charts/Chartist'
export { CircleChart, SingleBar }
export { CircleChart, SingleBar, Chartist }

View File

@ -21,7 +21,6 @@ import { T, INPUT_TYPES, VN_DRIVERS } from 'client/constants'
const {
fw,
ebtables,
dot1Q,
vxlan,
ovswitch,
@ -60,7 +59,7 @@ const FILTER_MAC_SPOOFING_FIELD = {
name: 'FILTER_MAC_SPOOFING',
label: T.MacSpoofingFilter,
type: INPUT_TYPES.SWITCH,
onlyOnHypervisors: [fw, ebtables],
onlyOnHypervisors: [fw],
validation: boolean().yesOrNo(),
grid: { md: 12 },
}
@ -70,7 +69,7 @@ const FILTER_IP_SPOOFING_FIELD = {
name: 'FILTER_IP_SPOOFING',
label: T.IpSpoofingFilter,
type: INPUT_TYPES.SWITCH,
onlyOnHypervisors: [fw, ebtables],
onlyOnHypervisors: [fw],
validation: boolean().yesOrNo(),
grid: { md: 12 },
}

View File

@ -303,11 +303,13 @@ const EnhancedTable = ({
setFilter(LABEL_COLUMN_ID, nextFilter)
},
})}
onClick={() => {
onClick={(e) => {
typeof onRowClick === 'function' && onRowClick(original)
if (!disableRowSelect) {
singleSelect && toggleAllRowsSelected?.(false)
singleSelect ||
(!(e.ctrlKey || e.metaKey) &&
toggleAllRowsSelected?.(false))
toggleRowSelected?.(!isSelected)
}
}}

View File

@ -0,0 +1,76 @@
/* ------------------------------------------------------------------------- *
* Copyright 2002-2022, OpenNebula Project, OpenNebula Systems *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may *
* not use this file except in compliance with the License. You may obtain *
* a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* ------------------------------------------------------------------------- */
import { ReactElement } from 'react'
import { Grid } from '@mui/material'
import makeStyles from '@mui/styles/makeStyles'
import PropTypes from 'prop-types'
import { DateTime } from 'luxon'
import { useGetMonitoringQuery } from 'client/features/OneApi/vm'
import { Chartist } from 'client/components/Charts'
import { Tr } from 'client/components/HOC'
import { prettyBytes } from 'client/utils'
import { T } from 'client/constants'
const useStyles = makeStyles({
container: {
gridColumn: '1 / -1',
},
})
const interpolationX = (value) => DateTime.fromMillis(value).toFormat('HH:mm')
/**
* Render Graphs Capacity.
*
* @param {object} props - Props
* @param {string} props.id - Virtual machine id
* @returns {ReactElement} Capacity Graphs.
*/
const Graphs = ({ id }) => {
const classes = useStyles()
const { data: monitoring = [] } = useGetMonitoringQuery(id)
return (
<Grid container spacing={1} className={classes.container}>
<Chartist
name={Tr(T.RealCpu)}
filter={['CPU']}
data={monitoring}
y="CPU"
x="TIMESTAMP"
interpolationX={interpolationX}
/>
<Chartist
name={Tr(T.RealMemory)}
filter={['MEMORY']}
data={monitoring}
y="MEMORY"
x="TIMESTAMP"
interpolationY={(value) => prettyBytes(value)}
interpolationX={interpolationX}
/>
</Grid>
)
}
Graphs.propTypes = {
id: PropTypes.string,
}
Graphs.displayName = 'Graphs'
export default Graphs

View File

@ -31,6 +31,7 @@ import {
} from 'client/components/Tabs/Common'
import Information from 'client/components/Tabs/Vm/Info/information'
import Capacity from 'client/components/Tabs/Vm/Info/capacity'
import Graphs from 'client/components/Tabs/Vm/Info/graphs'
import { SubmitButton } from 'client/components/FormControl'
import { Tr, Translate } from 'client/components/HOC'
@ -183,7 +184,10 @@ const VmInfoTab = ({ tabProps = {}, id }) => {
/>
)}
{capacityPanel?.enabled && (
<Capacity actions={getActions(capacityPanel?.actions)} vm={vm} />
<>
<Capacity actions={getActions(capacityPanel?.actions)} vm={vm} />
<Graphs id={id} />
</>
)}
{attributesPanel?.enabled && attributes && (
<AttributePanel

View File

@ -0,0 +1,89 @@
/* ------------------------------------------------------------------------- *
* Copyright 2002-2022, OpenNebula Project, OpenNebula Systems *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may *
* not use this file except in compliance with the License. You may obtain *
* a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* ------------------------------------------------------------------------- */
import { ReactElement } from 'react'
import { Grid } from '@mui/material'
import PropTypes from 'prop-types'
import { DateTime } from 'luxon'
import { useGetMonitoringQuery } from 'client/features/OneApi/vm'
import { Chartist } from 'client/components/Charts'
import { Tr } from 'client/components/HOC'
import { T } from 'client/constants'
const interpolationHour = (value) =>
DateTime.fromMillis(value).toFormat('HH:mm')
const interpolationBytesSeg = (value) => `${value}B/s`
const interpolationY = (value) => `${value}B`
/**
* Render Graphs Capacity.
*
* @param {object} props - Props
* @param {string} props.id - Virtual machine id
* @returns {ReactElement} Capacity Graphs.
*/
const Graphs = ({ id }) => {
const { data: monitoring = [] } = useGetMonitoringQuery(id)
return (
<Grid container spacing={1}>
<Chartist
name={Tr(T.NetRX)}
filter={['NETRX']}
data={monitoring}
y="NETRX"
x="TIMESTAMP"
interpolationX={interpolationHour}
interpolationY={interpolationY}
/>
<Chartist
name={Tr(T.NetTX)}
filter={['NETTX']}
data={monitoring}
y="NETTX"
x="TIMESTAMP"
interpolationY={interpolationY}
interpolationX={interpolationHour}
/>
<Chartist
name={Tr(T.NetDownloadSpeed)}
filter={['NETRX']}
data={monitoring}
y="NETRX"
x="TIMESTAMP"
interpolationX={interpolationHour}
interpolationY={interpolationBytesSeg}
/>
<Chartist
name={Tr(T.NetUploadSpeed)}
filter={['NETTX']}
data={monitoring}
y="NETTX"
x="TIMESTAMP"
interpolationX={interpolationHour}
interpolationY={interpolationBytesSeg}
/>
</Grid>
)
}
Graphs.propTypes = {
id: PropTypes.string,
}
Graphs.displayName = 'Graphs'
export default Graphs

View File

@ -25,7 +25,7 @@ import {
AttachSecGroupAction,
DetachSecGroupAction,
} from 'client/components/Tabs/Vm/Network/Actions'
import Graphs from 'client/components/Tabs/Vm/Network/Graphs'
import {
getNics,
getHypervisor,
@ -106,6 +106,7 @@ const VmNetworkTab = ({ tabProps: { actions } = {}, id }) => {
)
})}
</Stack>
<Graphs id={id} />
</div>
)
}

View File

@ -0,0 +1,90 @@
/* ------------------------------------------------------------------------- *
* Copyright 2002-2022, OpenNebula Project, OpenNebula Systems *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may *
* not use this file except in compliance with the License. You may obtain *
* a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* ------------------------------------------------------------------------- */
import { ReactElement } from 'react'
import { Grid } from '@mui/material'
import PropTypes from 'prop-types'
import { DateTime } from 'luxon'
import { useGetMonitoringQuery } from 'client/features/OneApi/vm'
import { Chartist } from 'client/components/Charts'
import { Tr } from 'client/components/HOC'
import { prettyBytes } from 'client/utils'
import { T } from 'client/constants'
const interpolationHour = (value) =>
DateTime.fromMillis(value).toFormat('HH:mm')
const interpolationBytes = (value) => prettyBytes(value)
const interpolationY = (value) => (+value * 100).toFixed() / 100
/**
* Render Graphs Capacity.
*
* @param {object} props - Props
* @param {string} props.id - Virtual machine id
* @returns {ReactElement} Capacity Graphs.
*/
const Graphs = ({ id }) => {
const { data: monitoring = [] } = useGetMonitoringQuery(id)
return (
<Grid container spacing={1}>
<Chartist
name={Tr(T.DiskReadBytes)}
filter={['DISKRDBYTES']}
data={monitoring}
y="DISKRDBYTES"
x="TIMESTAMP"
interpolationX={interpolationHour}
interpolationY={interpolationBytes}
/>
<Chartist
name={Tr(T.DiskWriteBytes)}
filter={['DISKWRBYTES']}
data={monitoring}
y="CDISKWRBYTES"
x="TIMESTAMP"
interpolationX={interpolationHour}
interpolationY={interpolationY}
/>
<Chartist
name={Tr(T.DiskReadIOPS)}
filter={['DISKRDIOPS']}
data={monitoring}
y="DISKRDIOPS"
x="TIMESTAMP"
interpolationX={interpolationHour}
interpolationY={(value) => value / 1000}
/>
<Chartist
name={Tr(T.DiskWriteIOPS)}
filter={['DISKWRIOPS']}
data={monitoring}
y="DISKWRIOPS"
x="TIMESTAMP"
interpolationX={interpolationHour}
interpolationY={interpolationY}
/>
</Grid>
)
}
Graphs.propTypes = {
id: PropTypes.string,
}
Graphs.displayName = 'Graphs'
export default Graphs

View File

@ -29,6 +29,7 @@ import {
SnapshotRenameAction,
SnapshotDeleteAction,
} from 'client/components/Tabs/Vm/Storage/Actions'
import Graphs from 'client/components/Tabs/Vm/Storage/Graphs'
import {
getDisks,
@ -134,6 +135,7 @@ const VmStorageTab = ({ tabProps: { actions } = {}, id }) => {
)
})}
</Stack>
<Graphs id={id} />
</div>
)
}

View File

@ -95,11 +95,6 @@ import * as ACTIONS from 'client/constants/actions'
* @property {string} [TEMPLATE.NETWORK_ADDRESS] - Network address
* @property {string} [TEMPLATE.NETWORK_MASK] - Network mask
* @property {string} [TEMPLATE.SEARCH_DOMAIN] - Domain
* @property {string} [TEMPLATE.VCENTER_FROM_WILD] - vCenter information
* @property {string} [TEMPLATE.VCENTER_INSTANCE_ID] - vCenter information
* @property {string} [TEMPLATE.VCENTER_NET_REF] - vCenter information
* @property {string} [TEMPLATE.VCENTER_PORTGROUP_TYPE] - vCenter information
* @property {string} [TEMPLATE.VCENTER_TEMPLATE_REF] - vCenter information
*/
/** @type {STATES.StateInfo[]} Virtual Network states */
@ -208,12 +203,10 @@ export const AR_TYPES = {
export const VN_DRIVERS = {
bridge: 'bridge',
fw: 'fw',
ebtables: 'ebtables',
dot1Q: '802.1Q',
vxlan: 'vxlan',
ovswitch: 'ovswitch',
ovswitch_vxlan: 'ovswitch_vxlan',
vcenter: 'vcenter',
elastic: 'elastic',
nodeport: 'nodeport',
}
@ -236,12 +229,10 @@ export const VNET_METHODS6 = {
export const VN_DRIVERS_STR = {
[VN_DRIVERS.bridge]: 'Bridged',
[VN_DRIVERS.fw]: 'Bridged & Security Groups',
[VN_DRIVERS.ebtables]: 'Bridged & ebtables VLAN',
[VN_DRIVERS.dot1Q]: '802.1Q',
[VN_DRIVERS.vxlan]: 'VXLAN',
[VN_DRIVERS.ovswitch]: 'Open vSwitch',
[VN_DRIVERS.ovswitch_vxlan]: 'Open vSwitch - VXLAN',
[VN_DRIVERS.vcenter]: 'vCenter',
}
/**

View File

@ -824,6 +824,15 @@ module.exports = {
Swedish: 'Swedish',
Thai: 'Thai',
Turkish: 'Turkish',
/* VM graphs */
DiskReadBytes: 'Disk read bytes',
DiskWriteBytes: 'Disk write bytes',
DiskReadIOPS: 'Disk read IOPS',
DiskWriteIOPS: 'Disk write bytes',
NetRX: 'Net RX',
NetTX: 'Net TX',
NetDownloadSpeed: 'Net download speed',
NetUploadSpeed: 'Net upload speed',
/* VM Template schema - Input/Output - graphics - Remote connections */
DisplayUpdate: 'Display update',
/* VM Template schema - NUMA */

View File

@ -173,6 +173,8 @@ const vmApi = oneApi.injectEndpoints({
return { params: { id }, command }
},
transformResponse: (data) =>
[data?.MONITORING_DATA?.MONITORING ?? []].flat(),
}),
getMonitoringPool: builder.query({
/**

View File

@ -81,8 +81,9 @@ func (p *Pair) String() string {
} else {
buf := bytes.NewBufferString(p.XMLName.Local)
buf.WriteString("=\"")
buf.WriteString(strings.ReplaceAll(p.Value, `"`, `\"`))
buf.WriteString(strings.ReplaceAll(p.Value, `\`, `\\`))
newValue := strings.ReplaceAll(p.Value, `"`, `\"`)
newValue = strings.ReplaceAll(newValue, `\`, `\\`)
buf.WriteString(newValue)
buf.WriteByte('"')
return buf.String()

View File

@ -187,13 +187,15 @@ class OneServer(xmlrpc.client.ServerProxy):
Slightly tuned ServerProxy
"""
def __init__(self, uri, session, timeout=None, **options):
def __init__(self, uri, session, timeout=None, https_verify=True,
**options):
"""
Override the constructor to take the authentication or session
Will also configure the socket timeout
:param uri: OpenNebula endpoint
:param session: OpenNebula authentication session
:param timeout: Socket timetout
:param https_verify: if https cert should be verified
:param options: additional options for ServerProxy
"""
@ -209,6 +211,7 @@ class OneServer(xmlrpc.client.ServerProxy):
transport = RequestsTransport()
transport.set_https(uri.startswith('https'))
transport.set_https_verify(https_verify)
xmlrpc.client.ServerProxy.__init__(
self,
@ -309,6 +312,9 @@ class RequestsTransport(xmlrpc.client.Transport):
def set_https(self, https=False):
self.use_https = https
def set_https_verify(self, https_verify):
self.https_verify = https_verify
def request(self, host, handler, request_body, verbose=False):
"""
Make an xmlrpc request.
@ -320,7 +326,7 @@ class RequestsTransport(xmlrpc.client.Transport):
url = self._build_url(host, handler)
kwargs = {'verify': True}
kwargs = {'verify': self.https_verify }
resp = requests.post(url, data=request_body, headers=headers,
**kwargs)

View File

@ -41,7 +41,7 @@ module OneCfg::Config::Type
def load(name = @name)
reset
@content = YAML.load_file(name)
@content = YAML.load_file(name, :aliases => true)
@content
end

View File

@ -215,6 +215,12 @@
group: oneadmin
mode: '0640'
- name: /etc/one/guacd
class: Augeas::Shell
owner: root
group: oneadmin
mode: '0640'
- name: /etc/one/vcenter_driver.conf
class: Yaml
owner: root

View File

@ -60,7 +60,6 @@ UserPool::UserPool(SqlDB * db, time_t __session_expiration_time, bool is_slave,
{
int one_uid = -1;
int server_uid = -1;
int i;
ostringstream oss;
@ -72,11 +71,16 @@ UserPool::UserPool(SqlDB * db, time_t __session_expiration_time, bool is_slave,
set<int> gids;
set<int> agids;
string filenames[5];
string error_str;
string error_str;
Nebula& nd = Nebula::instance();
vector<string> filenames = {
nd.get_var_location() + "/.one/sunstone_auth",
nd.get_var_location() + "/.one/onegate_auth",
nd.get_var_location() + "/.one/oneflow_auth"
};
_session_expiration_time = __session_expiration_time;
// Set restricted attributes
@ -126,32 +130,31 @@ UserPool::UserPool(SqlDB * db, time_t __session_expiration_time, bool is_slave,
random = one_util::random_password();
filenames[0] = nd.get_var_location() + "/.one/sunstone_auth";
filenames[1] = nd.get_var_location() + "/.one/occi_auth";
filenames[2] = nd.get_var_location() + "/.one/ec2_auth";
filenames[3] = nd.get_var_location() + "/.one/onegate_auth";
filenames[4] = nd.get_var_location() + "/.one/oneflow_auth";
mkdir(string(nd.get_var_location() + "/.one").c_str(), S_IRWXU);
for (i=0 ; i < 5; i++)
for (const auto& file : filenames)
{
struct stat file_stat;
if ( stat(filenames[i].c_str(), &file_stat) == 0 )
if ( stat(file.c_str(), &file_stat) == 0 )
{
goto error_exists;
oss << "Password file " << file << " already exists "
<< "but OpenNebula is boostraping the database. Check your "
<< "database configuration in oned.conf.";
goto error_common;
}
int cfile = creat(filenames[i].c_str(), S_IRUSR | S_IWUSR);
int cfile = creat(file.c_str(), S_IRUSR | S_IWUSR);
close(cfile);
ofstream ofile;
ofile.open(filenames[i].c_str(), ios::out | ios::trunc);
ofile.open(file.c_str(), ios::out | ios::trunc);
if ( !ofile.is_open() )
{
goto error_no_open;
oss << "Could not create configuration file "<< file;
goto error_common;
}
ofile << SERVER_NAME << ":" << random << endl;
@ -204,16 +207,6 @@ error_one_name:
oss << "The name '" << SERVER_NAME << "' is reserved";
goto error_common;
error_no_open:
oss << "Could not create configuration file "<< filenames[i];
goto error_common;
error_exists:
oss << "Password file " << filenames[i] << " already exists "
<< "but OpenNebula is boostraping the database. Check your "
<< "database configuration in oned.conf.";
goto error_common;
error_oneadmin:
oss << "Error creating oneadmin user: " << error_str;
goto error_common;
@ -596,13 +589,18 @@ static int parse_auth_msg(
int tmp_gid;
bool gr_admin = false;
char c = is.peek();
char c;
is >> c;
if ( c == '*' )
{
is.get(c);
gr_admin = true;
}
else
{
is.unget();
}
is >> tmp_gid;

View File

@ -998,18 +998,27 @@ void VirtualMachinePool::delete_attach_disk(std::unique_ptr<VirtualMachine> vm)
update(vm.get());
vm.reset();
if ( disk == nullptr )
{
return;
}
// skip image release if there is other disk using the same image
int image_id;
set<int> image_ids;
disk->vector_value("IMAGE_ID", image_id);
vm->get_disks().get_image_ids(image_ids, uid);
bool do_image_release = image_ids.count(image_id) == 0;
vm.reset();
Nebula& nd = Nebula::instance();
ImageManager* imagem = nd.get_imagem();
Template tmpl;
int image_id;
tmpl.set(disk->vector_attribute());
tmpl.add("VMS", 0);
@ -1020,8 +1029,6 @@ void VirtualMachinePool::delete_attach_disk(std::unique_ptr<VirtualMachine> vm)
}
else
{
disk->vector_value("IMAGE_ID", image_id);
Quotas::quota_del(Quotas::IMAGE, uid, gid, &tmpl);
if (!disk->is_persistent())
@ -1040,7 +1047,10 @@ void VirtualMachinePool::delete_attach_disk(std::unique_ptr<VirtualMachine> vm)
imagem->clear_image_snapshots(image_id);
}
imagem->release_image(oid, image_id, false);
if (do_image_release)
{
imagem->release_image(oid, image_id, false);
}
}
delete disk;