1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-16 22:50:10 +03:00

Revert "F #4132: Redesign oneflow internal logic"

This reverts commit da969c5fd39b208072a864a76df2ffe678cb34f0.
This commit is contained in:
Ruben S. Montero 2020-02-19 14:41:58 +01:00
parent 4b17b1f6e8
commit 5e53c4f893
No known key found for this signature in database
GPG Key ID: A0CEA6FA880A1D87
35 changed files with 1905 additions and 3340 deletions

View File

@ -1945,9 +1945,7 @@ ONE_CLI_LIB_FILES="src/cli/one_helper/onegroup_helper.rb \
src/cli/one_helper/onevcenter_helper.rb \
src/cli/one_helper/onemarket_helper.rb \
src/cli/one_helper/onevntemplate_helper.rb \
src/cli/one_helper/onehook_helper.rb \
src/cli/one_helper/oneflow_helper.rb \
src/cli/one_helper/oneflowtemplate_helper.rb"
src/cli/one_helper/onehook_helper.rb"
CLI_BIN_FILES="src/cli/onevm \
src/cli/onehost \
@ -2252,8 +2250,7 @@ ONEFLOW_LIB_FILES="src/flow/lib/grammar.rb \
src/flow/lib/log.rb \
src/flow/lib/models.rb \
src/flow/lib/strategy.rb \
src/flow/lib/validator.rb \
src/flow/lib/EventManager.rb"
src/flow/lib/validator.rb"
ONEFLOW_LIB_STRATEGY_FILES="src/flow/lib/strategy/straight.rb"

View File

@ -557,7 +557,8 @@ AllCops:
- src/flow/lib/models.rb
- src/flow/lib/validator.rb
- src/flow/lib/strategy/straight.rb
- src/flow/lib/EventManager.rb
- src/flow/oneflow-server.rb
########
# LAYOUT

View File

@ -1,317 +0,0 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'one_helper'
# Oneflow command helper
class OneFlowHelper < OpenNebulaHelper::OneHelper
# Get client to make request
#
# @options [Hash] CLI options
def client(options)
Service::Client.new(
:username => options[:username],
:password => options[:password],
:url => options[:server],
:user_agent => USER_AGENT
)
end
# Get service pool table
def format_service_pool
# TODO: config file
CLIHelper::ShowTable.new(nil, self) do
column :ID, 'ID', :size => 10 do |d|
d['ID']
end
column :USER, 'Username', :left, :size => 15 do |d|
d['UNAME']
end
column :GROUP, 'Group', :left, :size => 15 do |d|
d['GNAME']
end
column :NAME, 'Name', :size => 25, :left => true do |d|
d['NAME']
end
column :STATE, 'State', :size => 11, :left => true do |d|
Service.state_str(d['TEMPLATE']['BODY']['state'])
end
default :ID, :USER, :GROUP, :NAME, :STATE
end
end
# List service pool
#
# @param client [Service::Client] Petition client
# @param options [Hash] CLI options
def list_service_pool(client, options)
response = client.get(RESOURCE_PATH)
if CloudClient.is_error?(response)
[response.code.to_i, response.to_s]
else
array_list = JSON.parse(response.body)
array_list = array_list['DOCUMENT_POOL']['DOCUMENT']
array_list = [] if array_list.nil?
unless options.key? :done
# remove from list flows in DONE state
array_list.reject! do |value|
value['TEMPLATE']['BODY']['state'] == 5
end
end
if options[:json]
if array_list.empty?
0
else
[0, JSON.pretty_generate(array_list)]
end
else
format_service_pool.show(array_list)
0
end
end
end
# List service pool continiously
#
# @param client [Service::Client] Petition client
# @param options [Hash] CLI options
def top_service_pool(client, options)
# TODO: make default delay configurable
options[:delay] ? delay = options[:delay] : delay = 4
begin
loop do
CLIHelper.scr_cls
CLIHelper.scr_move(0, 0)
list_service_pool(client, options)
sleep delay
end
rescue StandardError => e
STDERR.puts e.message
exit(-1)
end
0
end
# Show service detailed information
#
# @param client [Service::Client] Petition client
# @param service [Integer] Service ID
# @param options [Hash] CLI options
def format_resource(client, service, options)
response = client.get("#{RESOURCE_PATH}/#{service}")
if CloudClient.is_error?(response)
[response.code.to_i, response.to_s]
else
if options[:json]
[0, response.body]
else
str_h1 = '%-80s'
document = JSON.parse(response.body)['DOCUMENT']
template = document['TEMPLATE']['BODY']
CLIHelper.print_header(
str_h1 % "SERVICE #{document['ID']} INFORMATION"
)
print_service_info(document)
print_roles_info(template['roles'])
return 0 unless template['log']
CLIHelper.print_header(str_h1 % 'LOG MESSAGES', false)
template['log'].each do |log|
t = Time.at(log['timestamp']).strftime('%m/%d/%y %H:%M')
puts "#{t} [#{log['severity']}] #{log['message']}"
end
0
end
end
end
private
# Get nodes pool table
def format_node_pool
# TODO: config file
CLIHelper::ShowTable.new(nil, self) do
column :VM_ID,
'ONE identifier for Virtual Machine',
:size => 6 do |d|
st = ''
if d['scale_up']
st << '\u2191 '
elsif d['disposed']
st << '\u2193 '
end
if d['vm_info'].nil?
st << d['deploy_id'].to_s
else
st << d['vm_info']['VM']['ID']
end
st
end
column :NAME,
'Name of the Virtual Machine',
:left,
:size => 24 do |d|
if !d['vm_info'].nil?
if d['vm_info']['VM']['RESCHED'] == '1'
"*#{d['NAME']}"
else
d['vm_info']['VM']['NAME']
end
else
''
end
end
column :USER,
'Username of the Virtual Machine owner',
:left,
:size => 15 do |d|
if !d['vm_info'].nil?
d['vm_info']['VM']['UNAME']
else
''
end
end
column :GROUP,
'Group of the Virtual Machine',
:left,
:size => 15 do |d|
if !d['vm_info'].nil?
d['vm_info']['VM']['GNAME']
else
''
end
end
default :VM_ID, :NAME, :USER, :GROUP
end
end
# Print service information
#
# @param document [Hash] Service document information
def print_service_info(document)
str = '%-20s: %-20s'
str_h1 = '%-80s'
template = document['TEMPLATE']['BODY']
puts Kernel.format(str, 'ID', document['ID'])
puts Kernel.format(str, 'NAME', document['NAME'])
puts Kernel.format(str, 'USER', document['UNAME'])
puts Kernel.format(str, 'GROUP', document['GNAME'])
puts Kernel.format(str, 'STRATEGY', template['deployment'])
puts Kernel.format(str,
'SERVICE STATE',
Service.state_str(template['state']))
if template['shutdown_action']
puts Kernel.format(str, 'SHUTDOWN', template['shutdown_action'])
end
puts
CLIHelper.print_header(str_h1 % 'PERMISSIONS', false)
%w[OWNER GROUP OTHER].each do |e|
mask = '---'
permissions_hash = document['PERMISSIONS']
mask[0] = 'u' if permissions_hash["#{e}_U"] == '1'
mask[1] = 'm' if permissions_hash["#{e}_M"] == '1'
mask[2] = 'a' if permissions_hash["#{e}_A"] == '1'
puts Kernel.format(str, e, mask)
end
puts
end
# Print service roles information
#
# @param roles [Array] Service roles information
def print_roles_info(roles)
str = '%-20s: %-20s'
roles.each do |role|
CLIHelper.print_header("ROLE #{role['name']}", false)
puts Kernel.format(str,
'ROLE STATE',
Role.state_str(role['state']))
if role['parents']
puts Kernel.format(str,
'PARENTS',
role['parents'].join(', '))
end
puts Kernel.format(str, 'VM TEMPLATE', role['vm_template'])
puts Kernel.format(str, 'CARDINALITY', role['cardinality'])
if role['min_vms']
puts Kernel.format(str, 'MIN VMS', role['min_vms'])
end
if role['max_vms']
puts Kernel.format(str, 'MAX VMS', role['max_vms'])
end
if role['coolddown']
puts Kernel.format(str, 'COOLDOWN', "#{role['cooldown']}s")
end
if role['shutdown_action']
puts Kernel.format(str, 'SHUTDOWN', role['shutdown_action'])
end
CLIHelper.print_header('NODES INFORMATION', false)
format_node_pool.show(role['nodes'])
puts
end
puts
end
end

View File

@ -1,145 +0,0 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'one_helper'
# Oneflow Template command helper
class OneFlowTemplateHelper < OpenNebulaHelper::OneHelper
# Get service template pool
def format_service_template_pool
# TODO: config file
CLIHelper::ShowTable.new(nil, self) do
column :ID, 'ID', :size => 10 do |d|
d['ID']
end
column :USER, 'Username', :left, :size => 15 do |d|
d['UNAME']
end
column :GROUP, 'Group', :left, :size => 15 do |d|
d['GNAME']
end
column :NAME, 'Name', :left, :size => 37 do |d|
d['NAME']
end
default :ID, :USER, :GROUP, :NAME
end
end
# List service template pool
#
# @param client [Service::Client] Petition client
# @param options [Hash] CLI options
def list_service_template_pool(client, options)
response = client.get(RESOURCE_PATH)
if CloudClient.is_error?(response)
[response.code.to_i, response.to_s]
else
if options[:json]
[0, response.body]
else
documents = JSON.parse(response.body)['DOCUMENT_POOL']
format_service_template_pool.show(documents['DOCUMENT'])
0
end
end
end
# List service template pool continiously
#
# @param client [Service::Client] Petition client
# @param options [Hash] CLI options
def top_service_template_pool(client, options)
# TODO: make default delay configurable
options[:delay] ? delay = options[:delay] : delay = 4
begin
loop do
CLIHelper.scr_cls
CLIHelper.scr_move(0, 0)
list_service_template_pool(client, options)
sleep delay
end
rescue StandardError => e
STDERR.puts e.message
exit(-1)
end
0
end
# Show service template detailed information
#
# @param client [Service::Client] Petition client
# @param service_template [Integer] Service template ID
# @param options [Hash] CLI options
def format_resource(client, service_template, options)
response = client.get("#{RESOURCE_PATH}/#{service_template}")
if CloudClient.is_error?(response)
[response.code.to_i, response.to_s]
else
if options[:json]
[0, response.body]
else
str = '%-20s: %-20s'
str_h1 = '%-80s'
document = JSON.parse(response.body)['DOCUMENT']
template = document['TEMPLATE']['BODY']
CLIHelper.print_header(
str_h1 % "SERVICE TEMPLATE #{document['ID']} INFORMATION"
)
puts Kernel.format str, 'ID', document['ID']
puts Kernel.format str, 'NAME', document['NAME']
puts Kernel.format str, 'USER', document['UNAME']
puts Kernel.format str, 'GROUP', document['GNAME']
puts
CLIHelper.print_header(str_h1 % 'PERMISSIONS', false)
%w[OWNER GROUP OTHER].each do |e|
mask = '---'
permissions_hash = document['PERMISSIONS']
mask[0] = 'u' if permissions_hash["#{e}_U"] == '1'
mask[1] = 'm' if permissions_hash["#{e}_M"] == '1'
mask[2] = 'a' if permissions_hash["#{e}_A"] == '1'
puts Kernel.format str, e, mask
end
puts
CLIHelper.print_header(str_h1 % 'TEMPLATE CONTENTS', false)
puts JSON.pretty_generate(template)
0
end
end
end
end

View File

@ -33,17 +33,370 @@ end
$LOAD_PATH << RUBY_LIB_LOCATION
$LOAD_PATH << RUBY_LIB_LOCATION + '/cli'
require 'json'
require 'command_parser'
require 'opennebula/oneflow_client'
require 'one_helper/oneflow_helper'
require 'cli_helper'
require 'one_helper/onevm_helper'
require 'json'
USER_AGENT = 'CLI'
# Base Path representing the resource to be used in the requests
RESOURCE_PATH = '/service'
#
# Table
#
SERVICE_TABLE = CLIHelper::ShowTable.new(nil, self) do
column :ID, 'ID', :size => 10 do |d|
d['ID']
end
column :USER, 'Username', :left, :size => 15 do |d|
d['UNAME']
end
column :GROUP, 'Group', :left, :size => 15 do |d|
d['GNAME']
end
column :NAME, 'Name', :size => 25, :left => true do |d|
d['NAME']
end
column :STATE, 'State', :size => 11, :left => true do |d|
Service.state_str(d['TEMPLATE']['BODY']['state'])
end
default :ID, :USER, :GROUP, :NAME, :STATE
end
NODE_TABLE = CLIHelper::ShowTable.new(nil, self) do
column :VM_ID, 'ONE identifier for Virtual Machine', :size => 6 do |d|
st = ''
if d['scale_up']
st << '\u2191 '
elsif d['disposed']
st << '\u2193 '
end
if d['vm_info'].nil?
st << d['deploy_id'].to_s
else
st << d['vm_info']['VM']['ID']
end
st
end
column :NAME, 'Name of the Virtual Machine', :left,
:size => 23 do |d|
if !d['vm_info'].nil?
if d['vm_info']['VM']['RESCHED'] == '1'
"*#{d['NAME']}"
else
d['vm_info']['VM']['NAME']
end
else
''
end
end
column :USER, 'Username of the Virtual Machine owner', :left,
:size => 8 do |d|
if !d['vm_info'].nil?
d['vm_info']['VM']['UNAME']
else
''
end
end
column :GROUP, 'Group of the Virtual Machine', :left,
:size => 8 do |d|
if !d['vm_info'].nil?
d['vm_info']['VM']['GNAME']
else
''
end
end
column :STAT, 'Actual status', :size => 4 do |d, _|
if !d['vm_info'].nil?
OneVMHelper.state_to_str(d['vm_info']['VM']['STATE'],
d['vm_info']['VM']['LCM_STATE'])
else
''
end
end
column :UCPU, 'CPU percentage used by the VM', :size => 4 do |d|
if !d['vm_info'].nil?
d['vm_info']['VM']['CPU']
else
''
end
end
column :UMEM, 'Memory used by the VM', :size => 7 do |d|
if !d['vm_info'].nil?
OpenNebulaHelper.unit_to_str(d['vm_info']['VM']['MEMORY'].to_i, {})
else
''
end
end
column :HOST, 'Host where the VM is running', :left, :size => 20 do |d|
if !d['vm_info'].nil?
if d['vm_info']['VM']['HISTORY_RECORDS'] &&
d['vm_info']['VM']['HISTORY_RECORDS']['HISTORY']
state_str =
VirtualMachine::VM_STATE[d['vm_info']['VM']['STATE'].to_i]
history = d['vm_info']['VM']['HISTORY_RECORDS']['HISTORY']
if %w[ACTIVE SUSPENDED].include? state_str
history = history.last if history.instance_of?(Array)
history['HOSTNAME']
end
end
else
''
end
end
column :TIME, 'Time since the VM was submitted', :size => 10 do |d|
if !d['vm_info'].nil?
stime = d['vm_info']['VM']['STIME'].to_i
if d['vm_info']['VM']['ETIME'] == '0'
etime = Time.now.to_i
else
etime = d['vm_info']['VM']['ETIME'].to_i
end
dtime = etime - stime
OpenNebulaHelper.period_to_str(dtime, false)
else
''
end
end
default :VM_ID, :NAME, :STAT, :UCPU, :UMEM, :HOST, :TIME
end
# List the services. This method is used in top and list commands
# @param [Service::Client] client
# @param [Hash] options
# @return [[Integer, String], Integer] Returns the exit_code and optionally
# a String to be printed
def list_services(client, options)
response = client.get(RESOURCE_PATH)
if CloudClient.is_error?(response)
[response.code.to_i, response.to_s]
else
# [0,response.body]
if options[:json]
[0, response.body]
else
array_list = JSON.parse(response.body)
SERVICE_TABLE.show(array_list['DOCUMENT_POOL']['DOCUMENT'])
0
end
end
end
# Show the service information. This method is used in top and show commands
# @param [Service::Client] client
# @param [Array] args
# @param [Hash] options
# @return [[Integer, String], Integer] Returns the exit_code and optionally
# a String to be printed
def show_service(client, args, options)
response = client.get("#{RESOURCE_PATH}/#{args[0]}")
if CloudClient.is_error?(response)
[response.code.to_i, response.to_s]
else
# [0,response.body]
if options[:json]
[0, response.body]
else
str = '%-20s: %-20s'
str_h1 = '%-80s'
document_hash = JSON.parse(response.body)
template = document_hash['DOCUMENT']['TEMPLATE']['BODY']
str_header = "SERVICE #{document_hash['DOCUMENT']['ID']} "\
'INFORMATION'
CLIHelper.print_header(str_h1 % str_header)
puts Kernel.format(str, 'ID', document_hash['DOCUMENT']['ID'])
puts Kernel.format(str, 'NAME', document_hash['DOCUMENT']['NAME'])
puts Kernel.format(str, 'USER', document_hash['DOCUMENT']['UNAME'])
puts Kernel.format(str, 'GROUP', document_hash['DOCUMENT']['GNAME'])
puts Kernel.format(str, 'STRATEGY', template['deployment'])
puts Kernel.format(str,
'SERVICE STATE',
Service.state_str(template['state']))
if template['shutdown_action']
puts Kernel.format(str, 'SHUTDOWN', template['shutdown_action'])
end
puts
CLIHelper.print_header(str_h1 % 'PERMISSIONS', false)
%w[OWNER GROUP OTHER].each do |e|
mask = '---'
permissions_hash = document_hash['DOCUMENT']['PERMISSIONS']
mask[0] = 'u' if permissions_hash["#{e}_U"] == '1'
mask[1] = 'm' if permissions_hash["#{e}_M"] == '1'
mask[2] = 'a' if permissions_hash["#{e}_A"] == '1'
puts Kernel.format(str, e, mask)
end
puts
template['roles'].each do |role|
CLIHelper.print_header("ROLE #{role['name']}", false)
puts Kernel.format(str,
'ROLE STATE',
Role.state_str(role['state']))
if role['parents']
puts Kernel.format(str,
'PARENTS',
role['parents'].join(', '))
end
puts Kernel.format(str, 'VM TEMPLATE', role['vm_template'])
puts Kernel.format(str, 'CARDINALITY', role['cardinality'])
if role['min_vms']
puts Kernel.format(str, 'MIN VMS', role['min_vms'])
end
if role['max_vms']
puts Kernel.format(str, 'MAX VMS', role['max_vms'])
end
if role['coolddown']
puts Kernel.format(str, 'COOLDOWN', "#{role['cooldown']}s")
end
if role['shutdown_action']
puts Kernel.format(str, 'SHUTDOWN', role['shutdown_action'])
end
puts 'NODES INFORMATION'
NODE_TABLE.show(role['nodes'])
if !role['elasticity_policies'].nil? &&
!role['elasticity_policies'].empty? ||
!role['scheduled_policies'].nil? &&
!role['scheduled_policies'].empty?
puts
puts 'ELASTICITY RULES'
if role['elasticity_policies'] &&
!role['elasticity_policies'].empty?
puts
# puts 'ELASTICITY POLICIES'
CLIHelper::ShowTable.new(nil, self) do
column :ADJUST, '', :left, :size => 12 do |d|
adjust_str(d)
end
column :EXPRESSION, '', :left, :size => 48 do |d|
if !d['expression_evaluated'].nil?
d['expression_evaluated']
else
d['expression']
end
end
column :EVALS, '', :right, :size => 5 do |d|
if d['period_number']
"#{d['true_evals'].to_i}/"\
"#{d['period_number']}"
else
'-'
end
end
column :PERIOD, '', :size => 6 do |d|
d['period'] ? "#{d['period']}s" : '-'
end
column :COOL, '', :size => 5 do |d|
d['cooldown'] ? "#{d['cooldown']}s" : '-'
end
default :ADJUST, :EXPRESSION, :EVALS, :PERIOD, :COOL
end.show([role['elasticity_policies']].flatten, {})
end
if role['scheduled_policies'] &&
!role['scheduled_policies'].empty?
puts
# puts 'SCHEDULED POLICIES'
CLIHelper::ShowTable.new(nil, self) do
column :ADJUST, '', :left, :size => 12 do |d|
adjust_str(d)
end
column :TIME, '', :left, :size => 67 do |d|
if d['start_time']
Time.parse(d['start_time']).to_s
else
d['recurrence']
end
end
default :ADJUST, :TIME
end.show([role['scheduled_policies']].flatten, {})
end
end
puts
end
puts
CLIHelper.print_header(str_h1 % 'LOG MESSAGES', false)
if template['log']
template['log'].each do |log|
t = Time.at(log['timestamp']).strftime('%m/%d/%y %H:%M')
puts "#{t} [#{log['severity']}] #{log['message']}"
end
end
0
end
end
end
def adjust_str(policy)
policy['adjust'].to_i >= 0 ? sign = '+' : sign = '-'
adjust = policy['adjust'].to_i.abs
case policy['type']
when 'CARDINALITY'
"= #{adjust}"
when 'PERCENTAGE_CHANGE'
st = "#{sign} #{adjust} %"
if policy['min_adjust_step']
st << " (#{policy['min_adjust_step']})"
end
st
else
"#{sign} #{adjust}"
end
end
#
# Commands
#
CommandParser::CmdParser.new(ARGV) do
usage '`oneflow` <command> [<args>] [<options>]'
version OpenNebulaHelper::ONE_VERSION
@ -52,19 +405,9 @@ CommandParser::CmdParser.new(ARGV) do
set :option, CommandParser::VERSION
set :option, CommandParser::HELP
DONE = {
:name => 'done',
:large => '--done',
:description => 'Show services in DONE state'
}
# create helper object
helper = OneFlowHelper.new
############################################################################
#
# Formatters for arguments
############################################################################
#
set :format, :groupid, OpenNebulaHelper.rname_to_id_desc('GROUP') do |arg|
OpenNebulaHelper.rname_to_id(arg, 'GROUP')
end
@ -81,213 +424,326 @@ CommandParser::CmdParser.new(ARGV) do
Service.list_to_id(arg, 'SERVICE')
end
set :format,
:vm_action,
set :format, :vm_action,
'Actions supported: #{Role::SCHEDULE_ACTIONS.join(', ')}' do |arg|
if Role::SCHEDULE_ACTIONS.include?(arg)
[0, arg]
else
[-1, "Action '#{arg}' is not supported. Supported actions: " \
[-1, "Action #{arg} is not supported. Actions supported: "\
"#{Role::SCHEDULE_ACTIONS.join(', ')}"]
end
end
###
#
# List
#
list_desc = <<-EOT.unindent
List the available services
EOT
command :list, list_desc, :options => [Service::JSON_FORMAT, DONE] do
helper.list_service_pool(helper.client(options), options)
command :list, list_desc, :options => Service::JSON_FORMAT do
client = Service::Client.new(
:username => options[:username],
:password => options[:password],
:url => options[:server],
:user_agent => USER_AGENT
)
list_services(client, options)
end
###
top_desc = <<-EOT.unindent
Top the available services
EOT
command :top, top_desc, :options => [CLIHelper::DELAY, DONE] do
Signal.trap('INT') { exit(-1) }
helper.top_service_pool(helper.client(options), options)
0
end
###
#
# Show
#
show_desc = <<-EOT.unindent
Show detailed information of a given service
EOT
command :show, show_desc, :service_id, :options => Service::JSON_FORMAT do
helper.format_resource(helper.client(options), args[0], options)
client = Service::Client.new(
:username => options[:username],
:password => options[:password],
:url => options[:server],
:user_agent => USER_AGENT
)
show_service(client, args, options)
end
###
#
# Top
#
top_desc = <<-EOT.unindent
Top the services or the extended information of the target service if a
id is specified
EOT
command :top, top_desc, [:service_id, nil],
:options => [Service::JSON_FORMAT,
Service::TOP,
CLIHelper::DELAY] do
client = Service::Client.new(
:username => options[:username],
:password => options[:password],
:url => options[:server],
:user_agent => USER_AGENT
)
options[:delay] ? delay = options[:delay] : delay = 3
begin
loop do
CLIHelper.scr_cls
CLIHelper.scr_move(0, 0)
if args[0]
rc, message = show_service(client, args, options)
else
rc, message = list_services(client, options)
end
raise message if rc
sleep delay
end
rescue StandardError => e
puts e.message
-1
end
end
#
# Delete
#
delete_desc = <<-EOT.unindent
Delete a given service
EOT
command :delete, delete_desc, [:range, :service_id_list] do
client = Service::Client.new(
:username => options[:username],
:password => options[:password],
:url => options[:server],
:user_agent => USER_AGENT
)
Service.perform_actions(args[0]) do |service_id|
helper.client(options).delete("#{RESOURCE_PATH}/#{service_id}")
client.delete("#{RESOURCE_PATH}/#{service_id}")
end
end
###
#
# Shutdown
#
shutdown_desc = <<-EOT.unindent
Shutdown a service.
From RUNNING or WARNING shuts down the Service
EOT
command :shutdown, shutdown_desc, [:range, :service_id_list] do
client = Service::Client.new(
:username => options[:username],
:password => options[:password],
:url => options[:server],
:user_agent => USER_AGENT
)
Service.perform_actions(args[0]) do |service_id|
json_action = Service.build_json_action('shutdown')
client.post("#{RESOURCE_PATH}/#{service_id}/action", json_action)
end
end
#
# Recover
#
recover_desc = <<-EOT.unindent
Recover a failed service, cleaning the failed VMs.
From FAILED_DEPLOYING continues deploying the Service
From FAILED_SCALING continues scaling the Service
From FAILED_UNDEPLOYING continues shutting down the Service
From COOLDOWN the Service is set to running ignoring the cooldown
From COOLDOWN the Service is set to running ignoring the cooldown duration
From WARNING failed VMs are deleted, and new VMs are instantiated
EOT
command :recover, recover_desc, [:range, :service_id_list] do
client = Service::Client.new(
:username => options[:username],
:password => options[:password],
:url => options[:server],
:user_agent => USER_AGENT
)
Service.perform_actions(args[0]) do |service_id|
helper.client(options).post("#{RESOURCE_PATH}/#{service_id}/action",
Service.build_json_action('recover'))
json_action = Service.build_json_action('recover')
client.post("#{RESOURCE_PATH}/#{service_id}/action", json_action)
end
end
###
#
# Scale
#
scale_desc = <<-EOT.unindent
Scale a role to the given cardinality
EOT
command :scale,
scale_desc,
:service_id,
:role_name,
:cardinality,
:options => [Service::FORCE] do
command :scale, scale_desc, :service_id, :role_name,
:cardinality, :options => [Service::FORCE] do
client = Service::Client.new(
:username => options[:username],
:password => options[:password],
:url => options[:server],
:user_agent => USER_AGENT
)
if args[2] !~ /^\d+$/
STDERR.puts 'Cardinality must be an integer number'
puts 'Cardinality must be an integer number'
exit(-1)
end
json = "{ \"cardinality\" : #{args[2]},\n" \
" \"force\" : #{options[:force] == true}, " \
" \"role_name\" : \"#{args[1]}\"}"
exit_code = 0
Service.perform_action(args[0]) do |service_id|
helper.client(options).post("#{RESOURCE_PATH}/#{service_id}/scale",
json)
json = "{ \"cardinality\" : #{args[2]},\n" \
" \"force\" : #{options[:force] == true} }"
response = client
.put("#{RESOURCE_PATH}/#{args[0]}/role/#{args[1]}", json)
if CloudClient.is_error?(response)
puts response.to_s
exit_code = response.code.to_i
end
end
###
exit_code
end
chgrp_desc = <<-EOT.unindent
Changes the service group
EOT
command :chgrp, chgrp_desc, [:range, :service_id_list], :groupid do
client = Service::Client.new(
:username => options[:username],
:password => options[:password],
:url => options[:server],
:user_agent => USER_AGENT
)
Service.perform_actions(args[0]) do |service_id|
params = {}
params['group_id'] = args[1].to_i
json = Service.build_json_action('chgrp', params)
json_action = Service.build_json_action('chgrp', params)
helper.client(options).post("#{RESOURCE_PATH}/#{service_id}/action",
json)
client.post("#{RESOURCE_PATH}/#{service_id}/action", json_action)
end
end
###
chown_desc = <<-EOT.unindent
Changes the service owner and group
EOT
command :chown,
chown_desc,
[:range, :service_id_list],
:userid,
[:groupid, nil] do
command :chown, chown_desc,
[:range, :service_id_list], :userid, [:groupid, nil] do
client = Service::Client.new(
:username => options[:username],
:password => options[:password],
:url => options[:server],
:user_agent => USER_AGENT
)
Service.perform_actions(args[0]) do |service_id|
params = {}
params['owner_id'] = args[1]
params['group_id'] = args[2] if args[2]
json = Service.build_json_action('chown', params)
json_action = Service.build_json_action('chown', params)
helper.client(options).post("#{RESOURCE_PATH}/#{service_id}/action",
json)
client.post("#{RESOURCE_PATH}/#{service_id}/action", json_action)
end
end
###
chmod_desc = <<-EOT.unindent
Changes the service permissions
EOT
command :chmod, chmod_desc, [:range, :service_id_list], :octet do
if !/\A\d+\z/.match(args[1])
STDERR.puts "Invalid '#{args[1]}' octed permissions"
exit(-1)
end
client = Service::Client.new(
:username => options[:username],
:password => options[:password],
:url => options[:server],
:user_agent => USER_AGENT
)
Service.perform_actions(args[0]) do |service_id|
params = {}
params['octet'] = args[1]
json = Service.build_json_action('chmod', params)
json_action = Service.build_json_action('chmod', params)
helper.client(options).post("#{RESOURCE_PATH}/#{service_id}/action",
json)
client.post("#{RESOURCE_PATH}/#{service_id}/action", json_action)
end
end
###
rename_desc = <<-EOT.unindent
Renames the Service
EOT
command :rename, rename_desc, :service_id, :name do
Service.perform_action(args[0]) do |service_id|
params = {}
params['name'] = args[1]
client = Service::Client.new(
:username => options[:username],
:password => options[:password],
:url => options[:server],
:user_agent => USER_AGENT
)
json = Service.build_json_action('rename', params)
params = {}
params['name'] = args[1]
helper.client(options).post("#{RESOURCE_PATH}/#{service_id}/action",
json)
json_action = Service.build_json_action('rename', params)
response = client
.post("#{RESOURCE_PATH}/#{args[0]}/action", json_action)
if CloudClient.is_error?(response)
[response.code.to_i, response.to_s]
else
response.code.to_i
end
end
###
action_desc = <<-EOT.unindent
Perform an action on all the Virtual Machines of a given role.
Actions supported: #{Role::SCHEDULE_ACTIONS.join(',')}
EOT
command :action,
action_desc,
:service_id,
:role_name,
:vm_action,
command :action, action_desc, :service_id, :role_name, :vm_action,
:options => [Service::PERIOD, Service::NUMBER] do
Service.perform_action(args[0]) do |service_id|
client = Service::Client.new(
:username => options[:username],
:password => options[:password],
:url => options[:server],
:user_agent => USER_AGENT
)
Service.perform_actions([args[0]]) do |service_id|
params = {}
params[:period] = options[:period].to_i if options[:period]
params[:number] = options[:number].to_i if options[:number]
json = Service.build_json_action(args[2], params)
client = helper.client(options)
json_action = Service.build_json_action(args[2], params)
client.post("#{RESOURCE_PATH}/#{service_id}/role/#{args[1]}/action",
json)
json_action)
end
end
end

View File

@ -33,19 +33,125 @@ end
$LOAD_PATH << RUBY_LIB_LOCATION
$LOAD_PATH << RUBY_LIB_LOCATION + '/cli'
require 'json'
require 'English'
require 'command_parser'
require 'opennebula/oneflow_client'
require 'English'
require 'cli_helper'
require 'one_helper/oneflowtemplate_helper'
require 'one_helper'
require 'json'
USER_AGENT = 'CLI'
# Base Path representing the resource to be used in the requests
RESOURCE_PATH = '/service_template'
#
# Table
#
TABLE = CLIHelper::ShowTable.new(nil, self) do
column :ID, 'ID', :size => 10 do |d|
d['ID']
end
column :USER, 'Username', :left, :size => 15 do |d|
d['UNAME']
end
column :GROUP, 'Group', :left, :size => 15 do |d|
d['GNAME']
end
column :NAME, 'Name', :left, :size => 37 do |d|
d['NAME']
end
default :ID, :USER, :GROUP, :NAME
end
# Show the service template information. This method is used in top and
# show commands
# @param [Service::Client] client
# @param [Array] args
# @param [Hash] options
# @return [[Integer, String], Integer] Returns the exit_code and optionally
# a String to be printed
def show_service_template(client, args, options)
response = client.get("#{RESOURCE_PATH}/#{args[0]}")
if CloudClient.is_error?(response)
[response.code.to_i, response.to_s]
else
# [0,response.body]
if options[:json]
[0, response.body]
else
str = '%-20s: %-20s'
str_h1 = '%-80s'
document_hash = JSON.parse(response.body)
template = document_hash['DOCUMENT']['TEMPLATE']['BODY']
CLIHelper.print_header(str_h1 %
"SERVICE TEMPLATE #{document_hash['DOCUMENT']['ID']} "\
'INFORMATION')
puts Kernel.format str, 'ID', document_hash['DOCUMENT']['ID']
puts Kernel.format str, 'NAME', document_hash['DOCUMENT']['NAME']
puts Kernel.format str, 'USER', document_hash['DOCUMENT']['UNAME']
puts Kernel.format str, 'GROUP', document_hash['DOCUMENT']['GNAME']
puts
CLIHelper.print_header(str_h1 % 'PERMISSIONS', false)
%w[OWNER GROUP OTHER].each do |e|
mask = '---'
permissions_hash = document_hash['DOCUMENT']['PERMISSIONS']
mask[0] = 'u' if permissions_hash["#{e}_U"] == '1'
mask[1] = 'm' if permissions_hash["#{e}_M"] == '1'
mask[2] = 'a' if permissions_hash["#{e}_A"] == '1'
puts Kernel.format str, e, mask
end
puts
CLIHelper.print_header(str_h1 % 'TEMPLATE CONTENTS', false)
puts JSON.pretty_generate(template)
0
end
end
end
# List the services. This method is used in top and list commands
# @param [Service::Client] client
# @param [Hash] options
# @return [[Integer, String], Integer] Returns the exit_code and optionally
# a String to be printed
def list_service_templates(client, options)
response = client.get(RESOURCE_PATH)
if CloudClient.is_error?(response)
[response.code.to_i, response.to_s]
else
# [0,response.body]
if options[:json]
[0, response.body]
else
array_list = JSON.parse(response.body)
TABLE.show(array_list['DOCUMENT_POOL']['DOCUMENT'])
0
end
end
end
#
# Commands
#
CommandParser::CmdParser.new(ARGV) do
usage '`oneflow-template` <command> [<args>] [<options>]'
version OpenNebulaHelper::ONE_VERSION
@ -54,12 +160,9 @@ CommandParser::CmdParser.new(ARGV) do
set :option, CommandParser::VERSION
set :option, CommandParser::HELP
# create helper object
helper = OneFlowTemplateHelper.new
############################################################################
#
# Formatters for arguments
############################################################################
#
set :format, :groupid, OpenNebulaHelper.rname_to_id_desc('GROUP') do |arg|
OpenNebulaHelper.rname_to_id(arg, 'GROUP')
end
@ -78,7 +181,9 @@ CommandParser::CmdParser.new(ARGV) do
Service.list_to_id(arg, 'SERVICE TEMPLATE')
end
###
#
# List
#
list_desc = <<-EOT.unindent
List the available Service Templates
@ -92,10 +197,12 @@ CommandParser::CmdParser.new(ARGV) do
:user_agent => USER_AGENT
)
helper.list_service_template_pool(client, options)
list_service_templates(client, options)
end
###
#
# Top
#
top_desc = <<-EOT.unindent
List the available Service Templates continuously
@ -112,29 +219,30 @@ CommandParser::CmdParser.new(ARGV) do
:user_agent => USER_AGENT
)
Signal.trap('INT') { exit(-1) }
options[:delay] ? delay = options[:delay] : delay = 3
helper.top_service_template_pool(client, options)
begin
loop do
CLIHelper.scr_cls
CLIHelper.scr_move(0, 0)
rc, message = list_service_templates(client, options)
if rc != 0
raise message
end
sleep delay
end
rescue StandardError => e
puts e.message
-1
end
end
###
show_desc = <<-EOT.unindent
Show detailed information of a given Service Template
EOT
command :show, show_desc, :templateid, :options => Service::JSON_FORMAT do
client = Service::Client.new(
:username => options[:username],
:password => options[:password],
:url => options[:server],
:user_agent => USER_AGENT
)
helper.format_resource(client, args[0], options)
end
###
#
# Create
#
create_desc = <<-EOT.unindent
Create a new Service Template
@ -163,7 +271,28 @@ CommandParser::CmdParser.new(ARGV) do
end
end
###
#
# Show
#
show_desc = <<-EOT.unindent
Show detailed information of a given Service Template
EOT
command :show, show_desc, :templateid, :options => Service::JSON_FORMAT do
client = Service::Client.new(
:username => options[:username],
:password => options[:password],
:url => options[:server],
:user_agent => USER_AGENT
)
show_service_template(client, args, options)
end
#
# Delete
#
delete_desc = <<-EOT.unindent
Delete a given Service Template
@ -182,7 +311,9 @@ CommandParser::CmdParser.new(ARGV) do
end
end
###
#
# Instantiate
#
instantiate_desc = <<-EOT.unindent
Instantiate a Service Template
@ -220,8 +351,6 @@ CommandParser::CmdParser.new(ARGV) do
end
end
###
chgrp_desc = <<-EOT.unindent
Changes the service template group
EOT
@ -244,8 +373,6 @@ CommandParser::CmdParser.new(ARGV) do
end
end
###
chown_desc = <<-EOT.unindent
Changes the service template owner and group
EOT
@ -270,8 +397,6 @@ CommandParser::CmdParser.new(ARGV) do
end
end
###
chmod_desc = <<-EOT.unindent
Changes the service template permissions
EOT
@ -294,8 +419,6 @@ CommandParser::CmdParser.new(ARGV) do
end
end
###
clone_desc = <<-EOT.unindent
Creates a new Service Template from an existing one
EOT
@ -329,8 +452,6 @@ CommandParser::CmdParser.new(ARGV) do
end
end
###
rename_desc = <<-EOT.unindent
Renames the Service Template
EOT
@ -358,8 +479,6 @@ CommandParser::CmdParser.new(ARGV) do
end
end
###
update_desc = <<-EOT.unindent
Update the template contents. If a path is not provided the editor will
be launched to modify the current content.

View File

@ -49,15 +49,15 @@
:action_number: 1
:action_period: 60
# Default name for the Virtual Machines and Virtual Networks created by oneflow. You can use any
# Default name for the Virtual Machines created by oneflow. You can use any
# of the following placeholders:
# $SERVICE_ID
# $SERVICE_NAME
# $ROLE_NAME
# $VM_NUMBER (onely for VM names)
# $VM_NUMBER
:vm_name_template: '$ROLE_NAME_$VM_NUMBER_(service_$SERVICE_ID)'
#:vn_name_template: '$ROLE_NAME(service_$SERVICE_ID)'
#############################################################
# Auth
#############################################################

View File

@ -1,322 +0,0 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'ActionManager'
require 'ffi-rzmq'
# OneFlow Event Manager
class EventManager
attr_writer :lcm
attr_reader :am
LOG_COMP = 'EM'
ACTIONS = {
'WAIT_DEPLOY' => :wait_deploy,
'WAIT_UNDEPLOY' => :wait_undeploy,
'WAIT_SCALEUP' => :wait_scaleup,
'WAIT_SCALEDOWN' => :wait_scaledown,
'WAIT_COOLDOWN' => :wait_cooldown
}
FAILURE_STATES = %w[
BOOT_FAILURE
BOOT_MIGRATE_FAILURE
PROLOG_MIGRATE_FAILURE
PROLOG_FAILURE
EPILOG_FAILURE
EPILOG_STOP_FAILURE
EPILOG_UNDEPLOY_FAILURE
PROLOG_MIGRATE_POWEROFF_FAILURE
PROLOG_MIGRATE_SUSPEND_FAILURE
PROLOG_MIGRATE_UNKNOWN_FAILURE
BOOT_UNDEPLOY_FAILURE
BOOT_STOPPED_FAILURE
PROLOG_RESUME_FAILURE
PROLOG_UNDEPLOY_FAILURE
]
# --------------------------------------------------------------------------
# Default configuration options for the module
# --------------------------------------------------------------------------
DEFAULT_CONF = {
:subscriber_endpoint => 'tcp://localhost:2101',
:timeout_s => 30,
:concurrency => 10,
:cloud_auth => nil,
:am => nil
}
def initialize(options)
@conf = DEFAULT_CONF.merge(options)
@lcm = options[:lcm]
@am = ActionManager.new(@conf[:concurrency], true)
@context = ZMQ::Context.new(1)
@cloud_auth = @conf[:cloud_auth]
# Register Action Manager actions
@am.register_action(ACTIONS['WAIT_DEPLOY'], method('wait_deploy_action'))
@am.register_action(ACTIONS['WAIT_UNDEPLOY'], method('wait_undeploy_action'))
@am.register_action(ACTIONS['WAIT_COOLDOWN'], method('wait_cooldown'))
@am.register_action(ACTIONS['WAIT_SCALEUP'], method('wait_scaleup_action'))
@am.register_action(ACTIONS['WAIT_SCALEDOWN'], method('wait_scaledown_action'))
Thread.new { @am.start_listener }
end
############################################################################
# Actions
############################################################################
# Wait for nodes to be in RUNNING if OneGate check required it will trigger
# another action after VMs are RUNNING
# @param [Service] service the service
# @param [Role] the role which contains the VMs
# @param [Node] nodes the list of nodes (VMs) to wait for
def wait_deploy_action(client, service_id, role_name, nodes)
Log.info LOG_COMP, "Waiting #{nodes} to be (ACTIVE, RUNNING)"
rc = wait(nodes, 'ACTIVE', 'RUNNING')
# Todo, check if OneGate confirmation is needed (trigger another action)
if rc[0]
@lcm.trigger_action(:deploy_cb,
service_id,
client,
service_id,
role_name)
else
@lcm.trigger_action(:deploy_failure_cb,
service_id,
client,
service_id,
role_name)
end
end
# Wait for nodes to be in DONE
# @param [service_id] the service id
# @param [role_name] the role name of the role which contains the VMs
# @param [nodes] the list of nodes (VMs) to wait for
def wait_undeploy_action(client, service_id, role_name, nodes)
Log.info LOG_COMP, "Waiting #{nodes} to be (DONE, LCM_INIT)"
rc = wait(nodes, 'DONE', 'LCM_INIT')
if rc[0]
@lcm.trigger_action(:undeploy_cb,
service_id,
client,
service_id,
role_name,
rc[1])
else
@lcm.trigger_action(:undeploy_failure_cb,
service_id,
client,
service_id,
role_name,
rc[1])
end
end
# Wait for nodes to be in RUNNING if OneGate check required it will trigger
# another action after VMs are RUNNING
# @param [Service] service the service
# @param [Role] the role which contains the VMs
# @param [Node] nodes the list of nodes (VMs) to wait for
# @param [Bool] up true if scalling up false otherwise
def wait_scaleup_action(client, service_id, role_name, nodes)
Log.info LOG_COMP, "Waiting #{nodes} to be (ACTIVE, RUNNING)"
rc = wait(nodes, 'ACTIVE', 'RUNNING')
# Todo, check if OneGate confirmation is needed (trigger another action)
if rc[0]
@lcm.trigger_action(:scaleup_cb,
service_id,
client,
service_id,
role_name)
else
@lcm.trigger_action(:scaleup_failure_cb,
service_id,
client,
service_id,
role_name)
end
end
def wait_scaledown_action(client, service_id, role_name, nodes)
Log.info LOG_COMP, "Waiting #{nodes} to be (DONE, LCM_INIT)"
rc = wait(nodes, 'DONE', 'LCM_INIT')
# Todo, check if OneGate confirmation is needed (trigger another action)
if rc[0]
@lcm.trigger_action(:scaledown_cb,
service_id,
client,
service_id,
role_name,
rc[1])
else
@lcm.trigger_action(:scaledown_failure_cb,
service_id,
client,
service_id,
role_name,
rc[1])
end
end
# Wait for nodes to be in DONE
# @param [service_id] the service id
# @param [role_name] the role name of the role which contains the VMs
# @param [nodes] the list of nodes (VMs) to wait for
def wait_cooldown(client, service_id, role_name, cooldown_time)
Log.info LOG_COMP, "Waiting #{cooldown_time}s for cooldown for " \
"service #{service_id} and role #{role_name}."
sleep cooldown_time
@lcm.trigger_action(:cooldown_cb,
service_id,
client,
service_id,
role_name)
end
private
############################################################################
# Helpers
############################################################################
def retrieve_id(key)
key.split('/')[-1].to_i
end
def wait(nodes, state, lcm_state)
subscriber = gen_subscriber
rc_nodes = { :successful => [], :failure => [] }
return [true, rc_nodes] if nodes.empty?
nodes.each do |node|
subscribe(node, state, lcm_state, subscriber)
end
key = ''
content = ''
until nodes.empty?
rc = subscriber.recv_string(key)
rc = subscriber.recv_string(content) if rc == 0
if rc == -1 && ZMQ::Util.errno != ZMQ::EAGAIN
Log.error LOG_COMP, 'Error reading from subscriber.'
elsif rc == -1
Log.info LOG_COMP, "Timeout reached for VM #{nodes} =>"\
" (#{state}, #{lcm_state})"
rc = check_nodes(nodes, state, lcm_state, subscriber)
rc_nodes[:successful].concat(rc[:successful])
rc_nodes[:failure].concat(rc[:failure])
next if !nodes.empty? && rc_nodes[:failure].empty?
# If any node is in error wait action will fails
return [false, rc_nodes] unless rc_nodes[:failure].empty?
return [true, rc_nodes] # (nodes.empty? && fail_nodes.empty?)
end
id = retrieve_id(key)
Log.info LOG_COMP, "Node #{id} reached (#{state},#{lcm_state})"
nodes.delete(id)
unsubscribe(id, state, lcm_state, subscriber)
rc_nodes[:successful] << id
end
[true, rc_nodes]
end
def check_nodes(nodes, state, lcm_state, subscriber)
rc_nodes = { :successful => [], :failure => [] }
nodes.delete_if do |node|
vm = OpenNebula::VirtualMachine
.new_with_id(node, @cloud_auth.client)
vm.info
vm_state = OpenNebula::VirtualMachine::VM_STATE[vm.state]
vm_lcm_state = OpenNebula::VirtualMachine::LCM_STATE[vm.lcm_state]
if vm_state == 'DONE' ||
(vm_state == state && vm_lcm_state == lcm_state)
unsubscribe(node, state, lcm_state, subscriber)
rc_nodes[:successful] << node
next true
end
if FAILURE_STATES.include? vm_lcm_state
Log.error LOG_COMP, "Node #{node} is in FAILURE state"
rc_nodes[:failure] << node
next true
end
false
end
rc_nodes
end
############################################################################
# Functionns to subscribe/unsuscribe to event changes on VM
############################################################################
def gen_subscriber
subscriber = @context.socket(ZMQ::SUB)
# Set timeout (TODO add option for customize timeout)
subscriber.setsockopt(ZMQ::RCVTIMEO, @conf[:timeout_s] * 10**3)
subscriber.connect(@conf[:subscriber_endpoint])
subscriber
end
def subscribe(vm_id, state, lcm_state, subscriber)
subscriber.setsockopt(ZMQ::SUBSCRIBE,
gen_filter(vm_id, state, lcm_state))
end
def unsubscribe(vm_id, state, lcm_state, subscriber)
subscriber.setsockopt(ZMQ::UNSUBSCRIBE,
gen_filter(vm_id, state, lcm_state))
end
def gen_filter(vm_id, state, lcm_state)
"EVENT STATE VM/#{state}/#{lcm_state}/#{vm_id}"
end
end

View File

@ -15,686 +15,163 @@
#--------------------------------------------------------------------------- #
require 'strategy'
require 'ActionManager'
# Service Life Cycle Manager
class ServiceLCM
attr_writer :event_manager
attr_reader :am
LOG_COMP = "LCM"
LOG_COMP = 'LCM'
ACTIONS = {
# Callbacks
'DEPLOY_CB' => :deploy_cb,
'DEPLOY_FAILURE_CB' => :deploy_failure_cb,
'UNDEPLOY_CB' => :undeploy_cb,
'UNDEPLOY_FAILURE_CB' => :undeploy_failure_cb,
'COOLDOWN_CB' => :cooldown_cb,
'SCALEUP_CB' => :scaleup_cb,
'SCALEUP_FAILURE_CB' => :scaleup_failure_cb,
'SCALEDOWN_CB' => :scaledown_cb,
'SCALEDOWN_FAILURE_CB' => :scaledown_failure_cb
}
def initialize(client, concurrency, cloud_auth)
def initialize(sleep_time, cloud_auth)
@sleep_time = sleep_time
@cloud_auth = cloud_auth
@am = ActionManager.new(concurrency, true)
@srv_pool = ServicePool.new(@cloud_auth, nil)
em_conf = {
:cloud_auth => @cloud_auth,
:concurrency => 10,
:lcm => @am
}
@event_manager = EventManager.new(em_conf).am
# Register Action Manager actions
@am.register_action(ACTIONS['DEPLOY_CB'],
method('deploy_cb'))
@am.register_action(ACTIONS['DEPLOY_FAILURE_CB'],
method('deploy_failure_cb'))
@am.register_action(ACTIONS['UNDEPLOY_CB'],
method('undeploy_cb'))
@am.register_action(ACTIONS['UNDEPLOY_FAILURE_CB'],
method('undeploy_failure_cb'))
@am.register_action(ACTIONS['SCALEUP_CB'],
method('scaleup_cb'))
@am.register_action(ACTIONS['SCALEUP_FAILURE_CB'],
method('scaleup_failure_cb'))
@am.register_action(ACTIONS['SCALEDOWN_CB'],
method('scaledown_cb'))
@am.register_action(ACTIONS['SCALEDOWN_FAILURE_CB'],
method('scaledown_failure_cb'))
@am.register_action(ACTIONS['COOLDOWN_CB'],
method('cooldown_cb'))
Thread.new { @am.start_listener }
Thread.new { catch_up(client) }
end
# Change service ownership
#
# @param client [OpenNebula::Client] Client executing action
# @param service_id [Integer] Service ID
# @param u_id [Integer] User ID
# @param g_id [Integer] Group ID
#
# @return [OpenNebula::Error] Error if any
def chown_action(client, service_id, u_id, g_id)
rc = @srv_pool.get(service_id, client) do |service|
service.chown(u_id, g_id)
end
def loop()
Log.info LOG_COMP, "Starting Life Cycle Manager"
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
while true
srv_pool = ServicePool.new(@cloud_auth.client)
rc
end
rc = srv_pool.info_all()
# Change service permissions
#
# @param client [OpenNebula::Client] Client executing action
# @param service_id [Integer] Service ID
# @param octet [Integer] Permissions in octet format
#
# @return [OpenNebula::Error] Error if any
def chmod_action(client, service_id, octet)
rc = @srv_pool.get(service_id, client) do |service|
service.chmod_octet(octet)
end
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
rc
end
# Change service name
#
# @param client [OpenNebula::Client] Client executing action
# @param service_id [Integer] Service ID
# @param new_name [String] New service name
#
# @return [OpenNebula::Error] Error if any
def rename_action(client, service_id, new_name)
rc = @srv_pool.get(service_id, client) do |service|
service.rename(new_name)
end
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
rc
end
# Add shced action to service role
#
# @param client [OpenNebula::Client] Client executing action
# @param service_id [Integer] Service ID
# @param role_name [String] Role to add action
# @param action [String] Action to perform
# @param period [Integer] When to execute the action
# @param number [Integer] How many VMs per period
#
# @return [OpenNebula::Error] Error if any
def sched_action(client, service_id, role_name, action, period, number)
rc = @srv_pool.get(service_id, client) do |service|
role = service.roles[role_name]
if role.nil?
break OpenNebula::Error.new("Role '#{role_name}' not found")
end
role.batch_action(action, period, number)
end
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
rc
end
# Create new service
#
# @param client [OpenNebula::Client] Client executing action
# @param service_id [Integer] Service ID
#
# @return [OpenNebula::Error] Error if any
def deploy_action(client, service_id)
rc = @srv_pool.get(service_id, client) do |service|
# Create vnets only first time action is called
if service.state == Service::STATE['PENDING']
rc = service.deploy_networks
if OpenNebula.is_error?(rc)
service.set_state(Service::STATE['FAILED_DEPLOYING'])
service.update
break rc
end
end
set_deploy_strategy(service)
roles = service.roles_deploy
# Maybe roles.empty? because are being deploying in other threads
if roles.empty?
if service.all_roles_running?
service.set_state(Service::STATE['RUNNING'])
service.update
end
# If there is no node in PENDING the service is not modified.
break
end
rc = deploy_roles(client,
roles,
'DEPLOYING',
'FAILED_DEPLOYING',
false)
if !OpenNebula.is_error?(rc)
service.set_state(Service::STATE['DEPLOYING'])
if OpenNebula.is_error?(rc)
Log.error LOG_COMP, "Error retrieving the Service Pool: #{rc.message}"
else
service.set_state(Service::STATE['FAILED_DEPLOYING'])
srv_pool.each_xpath('DOCUMENT/ID') { |id|
rc_get = srv_pool.get(id.to_i) { |service|
owner_client = @cloud_auth.client(service.owner_name)
service.replace_client(owner_client)
Log.debug LOG_COMP, "Loop for service #{service.id()} #{service.name()}" \
" #{service.state_str()} #{service.strategy()}"
strategy = get_deploy_strategy(service)
case service.state()
when Service::STATE['PENDING']
service.set_state(Service::STATE['DEPLOYING'])
rc = strategy.boot_step(service)
if !rc[0]
service.set_state(Service::STATE['FAILED_DEPLOYING'])
end
when Service::STATE['DEPLOYING']
strategy.monitor_step(service)
if service.all_roles_running?
service.set_state(Service::STATE['RUNNING'])
elsif service.any_role_failed?
service.set_state(Service::STATE['FAILED_DEPLOYING'])
else
rc = strategy.boot_step(service)
if !rc[0]
service.set_state(Service::STATE['FAILED_DEPLOYING'])
end
end
when Service::STATE['RUNNING'], Service::STATE['WARNING']
strategy.monitor_step(service)
if service.all_roles_running?
if service.state() == Service::STATE['WARNING']
service.set_state(Service::STATE['RUNNING'])
end
else
if service.state() == Service::STATE['RUNNING']
service.set_state(Service::STATE['WARNING'])
end
end
if strategy.apply_scaling_policies(service)
service.set_state(Service::STATE['SCALING'])
rc = strategy.scale_step(service)
if !rc[0]
service.set_state(Service::STATE['FAILED_SCALING'])
end
end
when Service::STATE['SCALING']
strategy.monitor_step(service)
if service.any_role_failed_scaling?
service.set_state(Service::STATE['FAILED_SCALING'])
elsif service.any_role_cooldown?
service.set_state(Service::STATE['COOLDOWN'])
elsif !service.any_role_scaling?
service.set_state(Service::STATE['RUNNING'])
else
rc = strategy.scale_step(service)
if !rc[0]
service.set_state(Service::STATE['FAILED_SCALING'])
end
end
when Service::STATE['COOLDOWN']
strategy.monitor_step(service)
if !service.any_role_cooldown?
service.set_state(Service::STATE['RUNNING'])
end
when Service::STATE['FAILED_SCALING']
strategy.monitor_step(service)
if !service.any_role_failed_scaling?
service.set_state(Service::STATE['SCALING'])
end
when Service::STATE['UNDEPLOYING']
strategy.monitor_step(service)
if service.all_roles_done?
service.set_state(Service::STATE['DONE'])
elsif service.any_role_failed?
service.set_state(Service::STATE['FAILED_UNDEPLOYING'])
else
rc = strategy.shutdown_step(service)
if !rc[0]
service.set_state(Service::STATE['FAILED_UNDEPLOYING'])
end
end
when Service::STATE['FAILED_DEPLOYING']
strategy.monitor_step(service)
if !service.any_role_failed?
service.set_state(Service::STATE['DEPLOYING'])
end
when Service::STATE['FAILED_UNDEPLOYING']
strategy.monitor_step(service)
if !service.any_role_failed?
service.set_state(Service::STATE['UNDEPLOYING'])
end
end
rc = service.update()
if OpenNebula.is_error?(rc)
Log.error LOG_COMP, "Error trying to update " <<
"Service #{service.id()} : #{rc.message}"
end
}
if OpenNebula.is_error?(rc_get)
Log.error LOG_COMP, "Error getting Service " <<
"#{id}: #{rc_get.message}"
end
}
end
service.update
rc
end
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
rc
end
# Delete service
#
# @param client [OpenNebula::Client] Client executing action
# @param service_id [Integer] Service ID
#
# @return [OpenNebula::Error] Error if any
def undeploy_action(client, service_id)
rc = @srv_pool.get(service_id, client) do |service|
unless service.can_undeploy?
break OpenNebula::Error.new(
'Service cannot be undeployed in state: ' \
"#{service.state_str}"
)
end
set_deploy_strategy(service)
roles = service.roles_shutdown
# If shutdown roles is empty, asume the service is in DONE and exit
if roles.empty?
if service.all_roles_done?
service.set_state(Service::STATE['DONE'])
service.update
end
break
end
rc = undeploy_roles(client,
roles,
'UNDEPLOYING',
'FAILED_UNDEPLOYING',
false)
if !OpenNebula.is_error?(rc)
service.set_state(Service::STATE['UNDEPLOYING'])
else
service.set_state(Service::STATE['FAILED_UNDEPLOYING'])
end
service.update
rc
end
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
rc
end
# Scale service
#
# @param client [OpenNebula::Client] Client executing action
# @param service_id [Integer] Service ID
# @param role_name [String] Role to scale
# @param cardinality [Integer] Number of VMs to scale
# @param force [Boolean] True to force scaling
#
# @return [OpenNebula::Error] Error if any
def scale_action(client, service_id, role_name, cardinality, force)
rc = @srv_pool.get(service_id, client) do |service|
unless service.can_scale?
break OpenNebula::Error.new(
"Service cannot be scaled in state: #{service.state_str}"
)
end
role = service.roles[role_name]
if role.nil?
break OpenNebula::Error.new("Role #{role_name} not found")
end
rc = nil
cardinality_diff = cardinality - role.cardinality
set_cardinality(role, cardinality, force)
if cardinality_diff > 0
role.scale_way('UP')
rc = deploy_roles(client,
{ role_name => role },
'SCALING',
'FAILED_SCALING',
true)
elsif cardinality_diff < 0
role.scale_way('DOWN')
rc = undeploy_roles(client,
{ role_name => role },
'SCALING',
'FAILED_SCALING',
true)
else
break OpenNebula::Error.new(
"Cardinality of #{role_name} is already at #{cardinality}"
)
end
if !OpenNebula.is_error?(rc)
service.set_state(Service::STATE['SCALING'])
else
service.set_state(Service::STATE['FAILED_SCALING'])
end
service.update
rc
end
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
rc
end
# Recover service
#
# @param client [OpenNebula::Client] Client executing action
# @param service_id [Integer] Service ID
#
# @return [OpenNebula::Error] Error if any
def recover_action(client, service_id)
# TODO, kill other proceses? (other recovers)
rc = @srv_pool.get(service_id, client) do |service|
if service.can_recover_deploy?
recover_deploy(client, service)
elsif service.can_recover_undeploy?
recover_undeploy(client, service)
elsif service.can_recover_scale?
recover_scale(client, service)
else
break OpenNebula::Error.new(
'Service cannot be recovered in state: ' \
"#{service.state_str}"
)
end
service.update
end
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
rc
end
private
############################################################################
# Callbacks
############################################################################
def deploy_cb(client, service_id, role_name)
rc = @srv_pool.get(service_id, client) do |service|
service.roles[role_name].set_state(Role::STATE['RUNNING'])
if service.all_roles_running?
service.set_state(Service::STATE['RUNNING'])
elsif service.strategy == 'straight'
set_deploy_strategy(service)
deploy_roles(client,
service.roles_deploy,
'DEPLOYING',
'FAILED_DEPLOYING',
false)
end
service.update
end
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
end
def deploy_failure_cb(client, service_id, role_name)
rc = @srv_pool.get(service_id, client) do |service|
# stop actions for the service if deploy fails
@event_manager.cancel_action(service_id)
service.set_state(Service::STATE['FAILED_DEPLOYING'])
service.roles[role_name].set_state(Role::STATE['FAILED_DEPLOYING'])
service.update
end
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
end
def undeploy_cb(client, service_id, role_name, nodes)
rc = @srv_pool.get(service_id, client) do |service|
service.roles[role_name].set_state(Role::STATE['DONE'])
service.roles[role_name].nodes.delete_if do |node|
!nodes[:failure].include?(node['deploy_id']) &&
nodes[:successful].include?(node['deploy_id'])
end
if service.all_roles_done?
rc = service.delete_networks
if rc && !rc.empty?
Log.info LOG_COMP, 'Error trying to delete '\
"Virtual Networks #{rc}"
end
service.set_state(Service::STATE['DONE'])
elsif service.strategy == 'straight'
set_deploy_strategy(service)
undeploy_roles(client,
service.roles_shutdown,
'UNDEPLOYING',
'FAILED_UNDEPLOYING',
false)
end
service.update
end
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
end
def undeploy_failure_cb(client, service_id, role_name, nodes)
rc = @srv_pool.get(service_id, client) do |service|
# stop actions for the service if deploy fails
@event_manager.cancel_action(service_id)
service.set_state(Service::STATE['FAILED_UNDEPLOYING'])
service.roles[role_name].set_state(Role::STATE['FAILED_UNDEPLOYING'])
service.roles[role_name].nodes.delete_if do |node|
!nodes[:failure].include?(node['deploy_id']) &&
nodes[:successful].include?(node['deploy_id'])
end
service.update
end
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
end
def scaleup_cb(client, service_id, role_name)
rc = @srv_pool.get(service_id, client) do |service|
service.set_state(Service::STATE['COOLDOWN'])
service.roles[role_name].set_state(Role::STATE['COOLDOWN'])
@event_manager.trigger_action(:wait_cooldown,
service.id,
client,
service.id,
role_name,
10) # TODO, config time
service.roles[role_name].clean_scale_way
service.update
end
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
end
def scaledown_cb(client, service_id, role_name, nodes)
rc = @srv_pool.get(service_id, client) do |service|
service.set_state(Service::STATE['COOLDOWN'])
service.roles[role_name].set_state(Role::STATE['COOLDOWN'])
service.roles[role_name].nodes.delete_if do |node|
!nodes[:failure].include?(node['deploy_id']) &&
nodes[:successful].include?(node['deploy_id'])
end
@event_manager.trigger_action(:wait_cooldown,
service.id,
client,
service.id,
role_name,
10) # TODO, config time
service.roles[role_name].clean_scale_way
service.update
end
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
end
def scaleup_failure_cb(client, service_id, role_name)
rc = @srv_pool.get(service_id, client) do |service|
# stop actions for the service if deploy fails
@event_manager.cancel_action(service_id)
service.set_state(Service::STATE['FAILED_SCALING'])
service.roles[role_name].set_state(Role::STATE['FAILED_SCALING'])
service.update
end
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
end
def scaledown_failure_cb(client, service_id, role_name, nodes)
rc = @srv_pool.get(service_id, client) do |service|
# stop actions for the service if deploy fails
@event_manager.cancel_action(service_id)
role = service.roles[role_name]
service.set_state(Service::STATE['FAILED_SCALING'])
role.set_state(Role::STATE['FAILED_SCALING'])
role.nodes.delete_if do |node|
!nodes[:failure].include?(node['deploy_id']) &&
nodes[:successful].include?(node['deploy_id'])
end
service.update
end
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
end
def cooldown_cb(client, service_id, role_name)
rc = @srv_pool.get(service_id, client) do |service|
service.set_state(Service::STATE['RUNNING'])
service.roles[role_name].set_state(Role::STATE['RUNNING'])
service.update
end
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
end
############################################################################
# Helpers
############################################################################
# Iterate through the services for catching up with the state of each servic
# used when the LCM starts
def catch_up(client)
Log.error LOG_COMP, 'Catching up...'
@srv_pool.info
@srv_pool.each do |service|
recover_action(client, service.id) if service.transient_state?
sleep @sleep_time
end
end
private
# Returns the deployment strategy for the given Service
# @param [Service] service the service
# rubocop:disable Naming/AccessorMethodName
def set_deploy_strategy(service)
# rubocop:enable Naming/AccessorMethodName
# @return [Strategy] the deployment Strategy
def get_deploy_strategy(service)
strategy = Strategy.new
case service.strategy
when 'straight'
service.extend(Straight)
else
service.extend(Strategy)
strategy.extend(Straight)
end
return strategy
end
# Returns true if the deployments of all roles was fine and
# update their state consequently
# @param [Array<Role>] roles to be deployed
# @param [Role::STATE] success_state new state of the role
# if deployed successfuly
# @param [Role::STATE] error_state new state of the role
# if deployed unsuccessfuly
def deploy_roles(client, roles, success_state, error_state, scale)
if scale
action = :wait_scaleup
else
action = :wait_deploy
end
rc = roles.each do |name, role|
rc = role.deploy
if !rc[0]
role.set_state(Role::STATE[error_state])
break OpenNebula::Error.new("Error deploying role #{name}")
end
role.set_state(Role::STATE[success_state])
@event_manager.trigger_action(action,
role.service.id,
client,
role.service.id,
role.name,
rc[0])
end
rc
end
def undeploy_roles(client, roles, success_state, error_state, scale)
if scale
action = :wait_scaledown
else
action = :wait_undeploy
end
rc = roles.each do |name, role|
rc = role.shutdown(false)
if !rc[0]
role.set_state(Role::STATE[error_state])
break OpenNebula::Error.new("Error undeploying role #{name}")
end
role.set_state(Role::STATE[success_state])
# TODO, take only subset of nodes which needs to be undeployed (new role.nodes_undeployed_ids ?)
@event_manager.trigger_action(action,
role.service.id,
client,
role.service.id,
role.name,
rc[0])
end
rc
end
def set_cardinality(role, cardinality, force)
tmpl_json = "{ \"cardinality\" : #{cardinality},\n" \
" \"force\" : #{force} }"
rc = role.update(JSON.parse(tmpl_json))
return rc if OpenNebula.is_error?(rc)
nil
end
def recover_deploy(client, service)
service.roles.each do |name, role|
next unless role.can_recover_deploy?
nodes = role.recover_deploy
@event_manager.trigger_action(:wait_deploy,
service.id,
client,
service.id,
name,
nodes)
end
end
def recover_undeploy(client, service)
service.roles.each do |name, role|
next unless role.can_recover_undeploy?
nodes = role.recover_undeploy
@event_manager.trigger_action(:wait_undeploy,
service.id,
client,
service.id,
name,
nodes)
end
end
def recover_scale(client, service)
service.roles.each do |name, role|
next unless role.can_recover_scale?
nodes, up = role.recover_scale
if up
action = :wait_scaleup
else
action = :wait_scaledown
end
@event_manager.trigger_action(action,
service.id,
client,
service.id,
name,
nodes)
end
end
end

View File

@ -19,33 +19,29 @@ require 'treetop/version'
require 'grammar'
require 'parse-cron'
if !(Gem::Version.create('1.6.3') < Gem.loaded_specs['treetop'].version)
raise 'treetop gem version must be >= 1.6.3.'\
"Current version is #{Treetop::VERSION::STRING}"
if !(Gem::Version.create(Treetop::VERSION::STRING) >= Gem::Version.create('1.6.3'))
raise "treetop gem version must be >= 1.6.3. Current version is #{Treetop::VERSION::STRING}"
end
module OpenNebula
# Service Role class
class Role
attr_reader :service
# Actions that can be performed on the VMs of a given Role
SCHEDULE_ACTIONS = %w[
terminate
terminate-hard
undeploy
undeploy-hard
hold
release
stop
suspend
resume
reboot
reboot-hard
poweroff
poweroff-hard
snapshot-create
SCHEDULE_ACTIONS = [
'terminate',
'terminate-hard',
'undeploy',
'undeploy-hard',
'hold',
'release',
'stop',
'suspend',
'resume',
'reboot',
'reboot-hard',
'poweroff',
'poweroff-hard',
'snapshot-create'
]
STATE = {
@ -62,45 +58,21 @@ module OpenNebula
'COOLDOWN' => 10
}
STATE_STR = %w[
PENDING
DEPLOYING
RUNNING
UNDEPLOYING
WARNING
DONE
FAILED_UNDEPLOYING
FAILED_DEPLOYING
SCALING
FAILED_SCALING
COOLDOWN
STATE_STR = [
'PENDING',
'DEPLOYING',
'RUNNING',
'UNDEPLOYING',
'WARNING',
'DONE',
'FAILED_UNDEPLOYING',
'FAILED_DEPLOYING',
'SCALING',
'FAILED_SCALING',
'COOLDOWN'
]
RECOVER_DEPLOY_STATES = %w[
FAILED_DEPLOYING
DEPLOYING
PENDING
]
RECOVER_UNDEPLOY_STATES = %w[
FAILED_UNDEPLOYING
UNDEPLOYING
]
RECOVER_SCALE_STATES = %w[
FAILED_SCALING
SCALING
]
SCALE_WAYS = {
'UP' => 0,
'DOWN' => 1
}
# VM information to save in document
VM_INFO = %w[ID UID GID UNAME GNAME NAME]
LOG_COMP = 'ROL'
LOG_COMP = "ROL"
def initialize(body, service)
@body = body
@ -111,73 +83,33 @@ module OpenNebula
end
def name
@body['name']
return @body['name']
end
# Returns the role state
# @return [Integer] the role state
def state
@body['state'].to_i
end
def can_recover_deploy?
return RECOVER_DEPLOY_STATES.include? STATE_STR[state] if state != STATE['PENDING']
parents.each do |parent|
return false if @service.roles[parent].state != STATE['RUNNING']
end
true
end
def can_recover_undeploy?
if !RECOVER_UNDEPLOY_STATES.include? STATE_STR[state]
# TODO, check childs if !empty? check if can be undeployed
@service.roles.each do |role_name, role|
next if role_name == name
if role.parents.include? name
return false if role.state != STATE['DONE']
end
end
end
true
end
def can_recover_scale?
return false unless RECOVER_SCALE_STATES.include? STATE_STR[state]
true
return @body['state'].to_i
end
# Returns the role parents
# @return [Array] the role parents
def parents
@body['parents'] || []
return @body['parents'] || []
end
# Returns the role cardinality
# @return [Integer] the role cardinality
def cardinality
@body['cardinality'].to_i
return @body['cardinality'].to_i
end
# Sets a new cardinality for this role
# @param [Integer] the new cardinality
# rubocop:disable Naming/AccessorMethodName
def set_cardinality(target_cardinality)
# rubocop:enable Naming/AccessorMethodName
if target_cardinality > cardinality
dir = 'up'
else
dir = 'down'
end
msg = "Role #{name} scaling #{dir} from #{cardinality} to "\
"#{target_cardinality} nodes"
Log.info LOG_COMP, msg, @service.id
dir = target_cardinality > cardinality ? "up" : "down"
msg = "Role #{name} scaling #{dir} from #{cardinality} to #{target_cardinality} nodes"
Log.info LOG_COMP, msg, @service.id()
@service.log_info(msg)
@body['cardinality'] = target_cardinality.to_i
@ -185,7 +117,7 @@ module OpenNebula
# Updates the cardinality with the current number of nodes
def update_cardinality()
@body['cardinality'] = @body['nodes'].size
@body['cardinality'] = @body['nodes'].size()
end
# Returns the role max cardinality
@ -193,9 +125,9 @@ module OpenNebula
def max_cardinality
max = @body['max_vms']
return if max.nil?
return nil if max.nil?
max.to_i
return max.to_i
end
# Returns the role min cardinality
@ -203,33 +135,27 @@ module OpenNebula
def min_cardinality
min = @body['min_vms']
return if min.nil?
return nil if min.nil?
min.to_i
return min.to_i
end
# Returns the string representation of the service state
# @return [String] the state string
def state_str
STATE_STR[state]
return STATE_STR[state]
end
# Returns the nodes of the role
# @return [Array] the nodes
def nodes
def get_nodes
@body['nodes']
end
def nodes_ids
@body['nodes'].map {|node| node['deploy_id'] }
end
# Sets a new state
# @param [Integer] the new state
# @return [true, false] true if the value was changed
# rubocop:disable Naming/AccessorMethodName
def set_state(state)
# rubocop:enable Naming/AccessorMethodName
if state < 0 || state > STATE_STR.size
return false
end
@ -247,19 +173,9 @@ module OpenNebula
end
end
Log.info LOG_COMP,
"Role #{name} new state: #{STATE_STR[state]}",
@service.id
Log.info LOG_COMP, "Role #{name} new state: #{STATE_STR[state]}", @service.id()
true
end
def scale_way(way)
@body['scale_way'] = SCALE_WAYS[way]
end
def clean_scale_way
@body.delete('scale_way')
return true
end
# Retrieves the VM information for each Node in this Role. If a Node
@ -280,9 +196,8 @@ module OpenNebula
rc = vm.info
if OpenNebula.is_error?(rc)
msg = "Role #{name} : VM #{vm_id} "\
"monitorization failed; #{rc.message}"
Log.error LOG_COMP, msg, @service.id
msg = "Role #{name} : VM #{vm_id} monitorization failed; #{rc.message}"
Log.error LOG_COMP, msg, @service.id()
@service.log_error(msg)
success = false
@ -300,7 +215,7 @@ module OpenNebula
if running && @service.ready_status_gate
running_status = node['vm_info']['VM']['USER_TEMPLATE']['READY'] || ""
running = running_status.upcase == 'YES'
running = running_status.upcase == "YES"
end
node['running'] = running
@ -323,7 +238,7 @@ module OpenNebula
@body['nodes'] = new_nodes
if !success
return OpenNebula::Error.new
return OpenNebula::Error.new()
end
return nil
@ -333,11 +248,8 @@ module OpenNebula
# @return [Array<true, nil>, Array<false, String>] true if all the VMs
# were created, false and the error reason if there was a problem
# creating the VMs
def deploy
deployed_nodes = []
n_nodes = cardinality - nodes.size
return [deployed_nodes, nil] if n_nodes == 0
def deploy(scale_up=false)
n_nodes = cardinality() - get_nodes.size
@body['last_vmname'] ||= 0
@ -350,90 +262,79 @@ module OpenNebula
# If the extra_template contains APPEND="<attr1>,<attr2>", it
# will add the attributes that already exist in the template,
# instead of replacing them.
append = extra_template
.match(/^\s*APPEND=\"?(.*?)\"?\s*$/)[1]
.split(',') rescue nil
append = extra_template.match(/^\s*APPEND=\"?(.*?)\"?\s*$/)[1].split(",") rescue nil
if append && !append.empty?
rc = template.info
if OpenNebula.is_error?(rc)
msg = "Role #{name} : Info template #{template_id};"\
" #{rc.message}"
msg = "Role #{name} : Info template #{template_id}; #{rc.message}"
Log.error LOG_COMP, msg, @service.id()
@service.log_error(msg)
return [false, 'Error fetching Info to instantiate the'\
" VM Template #{template_id} in Role "\
"#{name}: #{rc.message}"]
return [false, "Error fetching Info to instantiate the VM Template" \
" #{template_id} in Role #{self.name}: #{rc.message}"]
end
et = template.template_like_str('TEMPLATE',
true,
append.join('|'))
et = template.template_like_str("TEMPLATE",
true, append.join("|"))
et = et << "\n" << extra_template
extra_template = et
end
else
extra_template = ''
extra_template = ""
end
extra_template <<
"\nSERVICE_ID = #{@service.id}" \
"\nSERVICE_ID = #{@service.id()}" <<
"\nROLE_NAME = \"#{@body['name']}\""
n_nodes.times do
vm_name = @@vm_name_template
.gsub('$SERVICE_ID', @service.id.to_s)
.gsub('$SERVICE_NAME', @service.name.to_s)
.gsub('$ROLE_NAME', name.to_s)
.gsub('$VM_NUMBER', @body['last_vmname'].to_s)
n_nodes.times { |i|
vm_name = @@vm_name_template.
gsub("$SERVICE_ID", @service.id().to_s).
gsub("$SERVICE_NAME", @service.name().to_s).
gsub("$ROLE_NAME", name().to_s).
gsub("$VM_NUMBER", @body['last_vmname'].to_s)
@body['last_vmname'] += 1
Log.debug LOG_COMP, "Role #{name} : Trying to instantiate "\
"template #{template_id}, with name #{vm_name}", @service.id
Log.debug LOG_COMP, "Role #{name} : Trying to instantiate template "\
"#{template_id}, with name #{vm_name}", @service.id()
vm_id = template.instantiate(vm_name, false, extra_template)
deployed_nodes << vm_id
if OpenNebula.is_error?(vm_id)
msg = "Role #{name} : Instantiate failed for template "\
"#{template_id}; #{vm_id.message}"
Log.error LOG_COMP, msg, @service.id
msg = "Role #{name} : Instantiate failed for template #{template_id}; #{vm_id.message}"
Log.error LOG_COMP, msg, @service.id()
@service.log_error(msg)
return [false, 'Error trying to instantiate the VM ' \
"Template #{template_id} in Role " \
"#{name}: #{vm_id.message}"]
return [false, "Error trying to instantiate the VM Template" \
" #{template_id} in Role #{self.name}: #{vm_id.message}"]
end
Log.debug LOG_COMP, "Role #{name} : Instantiate success,"\
" VM ID #{vm_id}", @service.id
Log.debug LOG_COMP, "Role #{name} : Instantiate success, VM ID #{vm_id}", @service.id()
node = {
'deploy_id' => vm_id
'deploy_id' => vm_id,
}
vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
@service.client)
vm = OpenNebula::VirtualMachine.new_with_id(vm_id, @service.client)
rc = vm.info
if OpenNebula.is_error?(rc)
node['vm_info'] = nil
else
hash_vm = vm.to_hash['VM']
vm_info = {}
vm_info['VM'] = hash_vm.select {|v| VM_INFO.include?(v) }
node['vm_info'] = vm.to_hash
end
node['vm_info'] = vm_info
if scale_up
node['scale_up'] = '1'
end
@body['nodes'] << node
end
}
[deployed_nodes, nil]
return [true, nil]
end
# Terminate all the nodes in this role
@ -443,25 +344,27 @@ module OpenNebula
# @return [Array<true, nil>, Array<false, String>] true if all the VMs
# were terminated, false and the error reason if there was a problem
# shutting down the VMs
def shutdown(recover)
if nodes.size != cardinality
n_nodes = nodes.size - cardinality
def shutdown(scale_down=false)
success = true
nodes = get_nodes
if scale_down
n_nodes = nodes.size - cardinality()
else
n_nodes = nodes.size
end
rc = shutdown_nodes(nodes, n_nodes, recover)
shutdown_nodes(nodes[0..n_nodes-1], scale_down)
return [false, "Error undeploying nodes for role #{id}"] unless rc[0]
[rc[1], nil]
return [success, nil]
end
# Delete all the nodes in this role
# @return [Array<true, nil>] All the VMs are deleted, and the return
# ignored
def delete
nodes.each do |node|
get_nodes.each { |node|
vm_id = node['deploy_id']
Log.debug LOG_COMP, "Role #{name} : Deleting VM #{vm_id}", @service.id()
@ -479,13 +382,12 @@ module OpenNebula
msg = "Role #{name} : Delete failed for VM #{vm_id}; #{rc.message}"
Log.error LOG_COMP, msg, @service.id()
@service.log_error(msg)
set_state(Role::STATE['FAILED_DELETING'])
else
Log.debug LOG_COMP, "Role #{name} : Delete success for VM #{vm_id}", @service.id()
end
end
}
[true, nil]
return [true, nil]
end
# Changes the owner/group of all the nodes in this role
@ -497,7 +399,7 @@ module OpenNebula
# were updated, false and the error reason if there was a problem
# updating the VMs
def chown(uid, gid)
nodes.each { |node|
get_nodes.each { |node|
vm_id = node['deploy_id']
Log.debug LOG_COMP, "Role #{name} : Chown for VM #{vm_id}", @service.id()
@ -516,7 +418,7 @@ module OpenNebula
end
}
[true, nil]
return [true, nil]
end
# Schedule the given action on all the VMs that belong to the Role
@ -524,41 +426,34 @@ module OpenNebula
# @param [Integer] period
# @param [Integer] vm_per_period
def batch_action(action, period, vms_per_period)
vms_id = []
error_msgs = []
nodes = @body['nodes']
now = Time.now.to_i
vms_id = []
# TODO: check action is a valid string, period vm_per_period integer
error_msgs = []
nodes = @body['nodes']
now = Time.now.to_i
do_offset = ( !period.nil? && period.to_i > 0 &&
!vms_per_period.nil? && vms_per_period.to_i > 0 )
time_offset = 0
# if role is done, return error
if state == 5
return OpenNebula::Error.new("Role #{name} is in DONE state")
end
do_offset = (!period.nil? && period.to_i > 0 &&
!vms_per_period.nil? && vms_per_period.to_i > 0)
nodes.each_with_index do |node, index|
vm_id = node['deploy_id']
vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
@service.client)
vm = OpenNebula::VirtualMachine.new_with_id(vm_id, @service.client)
rc = vm.info
if OpenNebula.is_error?(rc)
msg = "Role #{name} : VM #{vm_id} monitorization failed;"\
" #{rc.message}"
msg = "Role #{name} : VM #{vm_id} monitorization failed; #{rc.message}"
error_msgs << msg
Log.error LOG_COMP, msg, @service.id
Log.error LOG_COMP, msg, @service.id()
@service.log_error(msg)
else
ids = vm.retrieve_elements('USER_TEMPLATE/SCHED_ACTION/ID')
id = 0
if !ids.nil? && !ids.empty?
if (!ids.nil? && !ids.empty?)
ids.map! {|e| e.to_i }
id = ids.max + 1
end
@ -566,22 +461,17 @@ module OpenNebula
tmp_str = vm.user_template_str
if do_offset
offset = (index / vms_per_period.to_i).floor
time_offset = offset * period.to_i
time_offset = (index / vms_per_period.to_i).floor * period.to_i
end
tmp_str << "\nSCHED_ACTION = [ID = #{id},ACTION = "\
"#{action}, TIME = #{now + time_offset}]"
tmp_str << "\nSCHED_ACTION = "<<
"[ID = #{id}, ACTION = #{action}, TIME = #{now + time_offset}]"
rc = vm.update(tmp_str)
if OpenNebula.is_error?(rc)
msg = "Role #{name} : VM #{vm_id} error scheduling "\
"action; #{rc.message}"
msg = "Role #{name} : VM #{vm_id} error scheduling action; #{rc.message}"
error_msgs << msg
Log.error LOG_COMP, msg, @service.id
Log.error LOG_COMP, msg, @service.id()
@service.log_error(msg)
else
vms_id << vm.id
@ -589,16 +479,15 @@ module OpenNebula
end
end
log_msg = "Action:#{action} scheduled on Role:#{name}"\
"VMs:#{vms_id.join(',')}"
log_msg = "Action:#{action} scheduled on Role:#{self.name} VMs:#{vms_id.join(',')}"
Log.info LOG_COMP, log_msg, @service.id()
Log.info LOG_COMP, log_msg, @service.id
return [true, log_msg] if error_msgs.empty?
error_msgs << log_msg
[false, error_msgs.join('\n')]
if error_msgs.empty?
return [true, log_msg]
else
error_msgs << log_msg
return [false, error_msgs.join('\n')]
end
end
# Returns true if the VM state is failure
@ -670,6 +559,29 @@ module OpenNebula
return [0, 0]
end
# Scales up or down the number of nodes needed to match the current
# cardinality
#
# @return [Array<true, nil>, Array<false, String>] true if all the VMs
# were created/shut down, false and the error reason if there
# was a problem
def scale()
n_nodes = 0
get_nodes.each do |node|
n_nodes += 1 if node['disposed'] != "1"
end
diff = cardinality - n_nodes
if diff > 0
return deploy(true)
elsif diff < 0
return shutdown(true)
end
return [true, nil]
end
# Updates the duration for the next cooldown
# @param cooldown_duration [Integer] duration for the next cooldown
@ -727,98 +639,39 @@ module OpenNebula
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
def update(template)
force = template['force'] == true
new_cardinality = template['cardinality']
return if new_cardinality.nil?
force = template['force'] == true
new_cardinality = template["cardinality"]
if new_cardinality.nil?
return nil
end
new_cardinality = new_cardinality.to_i
if !force
if new_cardinality < min_cardinality.to_i
if new_cardinality < min_cardinality().to_i
return OpenNebula::Error.new(
"Minimum cardinality is #{min_cardinality}"
)
"Minimum cardinality is #{min_cardinality()}")
elsif !max_cardinality.nil? &&
new_cardinality > max_cardinality.to_i
elsif !max_cardinality().nil? && new_cardinality > max_cardinality().to_i
return OpenNebula::Error.new(
"Maximum cardinality is #{max_cardinality}"
)
"Maximum cardinality is #{max_cardinality()}")
end
end
set_cardinality(new_cardinality)
nil
return nil
end
########################################################################
# Recover
########################################################################
def recover_deploy
nodes = @body['nodes']
deployed_nodes = []
nodes.each do |node|
vm_id = node['deploy_id']
vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
@service.client)
rc = vm.info
if OpenNebula.is_error?(rc)
msg = "Role #{name} : Retry failed for VM "\
"#{vm_id}; #{rc.message}"
Log.error LOG_COMP, msg, @service.id
next true
end
vm_state = vm.state
lcm_state = vm.lcm_state
next false if vm_state == 3 && lcm_state == 3 # ACTIVE/RUNNING
next true if vm_state == '6' # Delete DONE nodes
if Role.vm_failure?(vm_state, lcm_state)
rc = vm.recover(2)
if OpenNebula.is_error?(rc)
msg = "Role #{name} : Retry failed for VM "\
"#{vm_id}; #{rc.message}"
Log.error LOG_COMP, msg, @service.id
@service.log_error(msg)
else
deployed_nodes << vm_id
end
else
vm.resume
deployed_nodes << vm_id
end
end
rc = deploy
deployed_nodes.concat(rc[0]) if rc[1].nil?
deployed_nodes
end
def recover_undeploy
undeployed_nodes = []
rc = shutdown(true)
undeployed_nodes.concat(rc[0]) if rc[1].nil?
undeployed_nodes
def recover_deployment()
recover()
end
def recover_warning()
@ -826,120 +679,16 @@ module OpenNebula
deploy()
end
def recover_scale
rc = nil
if @body['scale_way'] == SCALE_WAYS['UP']
rc = [recover_deploy, true]
elsif @body['scale_way'] == SCALE_WAYS['DOWN']
rc = [recover_undeploy, false]
end
rc
def recover_scale()
recover()
retry_scale()
end
########################################################################
# Nodes info
########################################################################
# Determine if the role nodes are running
# @return [true|false]
def nodes_running?
if nodes.size != cardinality
return false
end
nodes.each do |node|
return false unless node && node['running']
end
true
end
# Returns true if any VM is in UNKNOWN or FAILED
# @return [true|false]
def nodes_warning?
nodes.each do |node|
next unless node && node['vm_info']
vm_state = node['vm_info']['VM']['STATE']
lcm_state = node['vm_info']['VM']['LCM_STATE']
# Failure or UNKNOWN
if vm_failure?(node) || (vm_state == '3' && lcm_state == '16')
return true
end
end
false
end
def nodes_done?
nodes.each do |node|
if node && node['vm_info']
vm_state = node['vm_info']['VM']['STATE']
if vm_state != '6' # DONE
return false
end
else
return false
end
end
true
end
# Determine if any of the role nodes failed
# @param [Role] role
# @return [true|false]
def any_node_failed?
nodes.each do |node|
if vm_failure?(node)
return true
end
end
false
end
# Determine if any of the role nodes failed to scale
# @return [true|false]
def any_node_failed_scaling?
nodes.each do |node|
if node && node['vm_info'] &&
(node['disposed'] == '1' || node['scale_up'] == '1') &&
vm_failure?(node)
return true
end
end
false
end
def role_finished_scaling?
nodes.each { |node|
# For scale up, check new nodes are running, or past running
if node
if node['scale_up'] == '1'
return false if !node['running']
end
else
return false
end
}
# TODO: If a shutdown ends in running again (VM doesn't have acpi),
# the role/service will stay in SCALING
# For scale down, it will finish when scaling nodes are deleted
return nodes.size() == cardinality()
end
########################################################################
########################################################################
private
# Returns a positive, 0, or negative number of nodes to adjust,
@ -1131,13 +880,13 @@ module OpenNebula
# For a failed scale up, the cardinality is updated to the actual value
# For a failed scale down, the shutdown actions are retried
def retry_scale()
nodes_dispose = nodes.select { |node|
nodes_dispose = get_nodes.select { |node|
node['disposed'] == "1"
}
shutdown_nodes(nodes_dispose, true)
set_cardinality(nodes.size - nodes_dispose.size)
set_cardinality( get_nodes.size() - nodes_dispose.size() )
end
# Deletes VMs in DONE or FAILED, and sends a resume action to VMs in UNKNOWN
@ -1194,53 +943,41 @@ module OpenNebula
@body['nodes'] = new_nodes
end
# Shuts down all the given nodes
# @param scale_down [true,false] True to set the 'disposed' node flag
def shutdown_nodes(nodes, n_nodes, recover)
success = true
undeployed_nodes = []
def shutdown_nodes(nodes, scale_down)
action = @body['shutdown_action']
if action.nil?
action = @service.shutdown_action
action = @service.get_shutdown_action()
end
if action.nil?
action = @@default_shutdown
end
nodes[0..n_nodes - 1].each do |node|
nodes.each { |node|
vm_id = node['deploy_id']
Log.debug(LOG_COMP,
"Role #{name} : Terminating VM #{vm_id}",
@service.id)
Log.debug LOG_COMP, "Role #{name} : Terminating VM #{vm_id}", @service.id()
vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
@service.client)
vm = OpenNebula::VirtualMachine.new_with_id(vm_id, @service.client)
vm_state = nil
lcm_state = nil
if recover
vm.info
vm_state = vm.state
lcm_state = vm.lcm_state
end
if recover && Role.vm_failure?(vm_state, lcm_state)
rc = vm.recover(2)
elsif action == 'terminate-hard'
if action == 'terminate-hard'
rc = vm.terminate(true)
else
rc = vm.terminate
end
if scale_down
node['disposed'] = '1'
end
if OpenNebula.is_error?(rc)
msg = "Role #{name} : Terminate failed for VM #{vm_id}, will perform a Delete; #{rc.message}"
Log.error LOG_COMP, msg, @service.id
Log.error LOG_COMP, msg, @service.id()
@service.log_error(msg)
if action != 'terminate-hard'
@ -1253,38 +990,19 @@ module OpenNebula
if OpenNebula.is_error?(rc)
msg = "Role #{name} : Delete failed for VM #{vm_id}; #{rc.message}"
Log.error LOG_COMP, msg, @service.id
Log.error LOG_COMP, msg, @service.id()
@service.log_error(msg)
success = false
#return [false, rc.message]
else
Log.debug(LOG_COMP,
"Role #{name} : Delete success for VM #{vm_id}",
@service.id)
undeployed_nodes << vm_id
Log.debug LOG_COMP, "Role #{name} : Delete success for VM #{vm_id}", @service.id()
end
else
Log.debug(LOG_COMP,
"Role #{name}: Terminate success for VM #{vm_id}",
@service.id)
undeployed_nodes << vm_id
Log.debug LOG_COMP, "Role #{name} : Terminate success for VM #{vm_id}", @service.id()
end
end
[success, undeployed_nodes]
end
def vm_failure?(node)
if node && node['vm_info']
return Role.vm_failure?(
vm_state = node['vm_info']['VM']['STATE'],
lcm_state = node['vm_info']['VM']['LCM_STATE'])
end
false
}
end
end
end

View File

@ -15,12 +15,8 @@
#--------------------------------------------------------------------------- #
module OpenNebula
# Service class as wrapper of DocumentJSON
class Service < DocumentJSON
attr_reader :roles, :client
DOCUMENT_TYPE = 100
STATE = {
@ -37,108 +33,44 @@ module OpenNebula
'COOLDOWN' => 10
}
STATE_STR = %w[
PENDING
DEPLOYING
RUNNING
UNDEPLOYING
WARNING
DONE
FAILED_UNDEPLOYING
FAILED_DEPLOYING
SCALING
FAILED_SCALING
COOLDOWN
STATE_STR = [
'PENDING',
'DEPLOYING',
'RUNNING',
'UNDEPLOYING',
'WARNING',
'DONE',
'FAILED_UNDEPLOYING',
'FAILED_DEPLOYING',
'SCALING',
'FAILED_SCALING',
'COOLDOWN'
]
TRANSIENT_STATES = %w[
DEPLOYING
UNDEPLOYING
SCALING
]
FAILED_STATES = %w[
FAILED_DEPLOYING
FAILED_UNDEPLOYING
FAILED_SCALING
]
RECOVER_DEPLOY_STATES = %w[
FAILED_DEPLOYING
DEPLOYING
PENDING
]
RECOVER_UNDEPLOY_STATES = %w[
FAILED_UNDEPLOYING
UNDEPLOYING
]
RECOVER_SCALE_STATES = %w[
FAILED_SCALING
SCALING
]
LOG_COMP = 'SER'
LOG_COMP = "SER"
# Returns the service state
# @return [Integer] the service state
def state
@body['state'].to_i
return @body['state'].to_i
end
# Returns the service strategy
# @return [String] the service strategy
def strategy
@body['deployment']
return @body['deployment']
end
# Returns the string representation of the service state
# @return the state string
def state_str
STATE_STR[state]
end
# Returns true if the service is in transient state
# @return true if the service is in transient state, false otherwise
def transient_state?
TRANSIENT_STATES.include? STATE_STR[state]
end
# Return true if the service is in failed state
# @return true if the service is in failed state, false otherwise
def failed_state?
FAILED_STATES.include? STATE_STR[state]
end
# Return true if the service can be undeployed
# @return true if the service can be undeployed, false otherwise
def can_undeploy?
if transient_state?
state != Service::STATE['UNDEPLOYING']
else
state != Service::STATE['DONE'] && !failed_state?
end
end
def can_recover_deploy?
RECOVER_DEPLOY_STATES.include? STATE_STR[state]
end
def can_recover_undeploy?
RECOVER_UNDEPLOY_STATES.include? STATE_STR[state]
end
def can_recover_scale?
RECOVER_SCALE_STATES.include? STATE_STR[state]
return STATE_STR[state]
end
# Sets a new state
# @param [Integer] the new state
# @return [true, false] true if the value was changed
# rubocop:disable Naming/AccessorMethodName
def set_state(state)
# rubocop:enable Naming/AccessorMethodName
if state < 0 || state > STATE_STR.size
return false
end
@ -146,16 +78,16 @@ module OpenNebula
@body['state'] = state
msg = "New state: #{STATE_STR[state]}"
Log.info LOG_COMP, msg, id
log_info(msg)
Log.info LOG_COMP, msg, self.id()
self.log_info(msg)
true
return true
end
# Returns the owner username
# @return [String] the service's owner username
def owner_name
self['UNAME']
def owner_name()
return self['UNAME']
end
# Replaces this object's client with a new one
@ -164,82 +96,86 @@ module OpenNebula
@client = owner_client
end
# Returns all the node Roles
# @return [Hash<String,Role>] all the node Roles
def get_roles
return @roles
end
# Returns true if all the nodes are correctly deployed
# @return [true, false] true if all the nodes are correctly deployed
def all_roles_running?
@roles.each do |_name, role|
def all_roles_running?()
@roles.each { |name, role|
if role.state != Role::STATE['RUNNING']
return false
end
end
}
true
return true
end
# Returns true if all the nodes are in done state
# @return [true, false] true if all the nodes are correctly deployed
def all_roles_done?
@roles.each do |_name, role|
def all_roles_done?()
@roles.each { |name, role|
if role.state != Role::STATE['DONE']
return false
end
end
}
true
return true
end
# Returns true if any of the roles is in failed state
# @return [true, false] true if any of the roles is in failed state
def any_role_failed?
def any_role_failed?()
failed_states = [
Role::STATE['FAILED_DEPLOYING'],
Role::STATE['FAILED_UNDEPLOYING'],
Role::STATE['FAILED_DELETING']
]
Role::STATE['FAILED_UNDEPLOYING']]
@roles.each do |_name, role|
@roles.each { |name, role|
if failed_states.include?(role.state)
return true
end
end
}
false
return false
end
# Returns the running_status_vm option
# @return [true, false] true if the running_status_vm option is enabled
def ready_status_gate
@body['ready_status_gate']
return @body['ready_status_gate']
end
def any_role_scaling?
@roles.each do |_name, role|
def any_role_scaling?()
@roles.each do |name, role|
if role.state == Role::STATE['SCALING']
return true
end
end
false
return false
end
def any_role_failed_scaling?
@roles.each do |_name, role|
def any_role_failed_scaling?()
@roles.each do |name, role|
if role.state == Role::STATE['FAILED_SCALING']
return true
end
end
false
return false
end
def any_role_cooldown?
@roles.each do |_name, role|
def any_role_cooldown?()
@roles.each do |name, role|
if role.state == Role::STATE['COOLDOWN']
return true
end
end
false
return false
end
# Create a new service based on the template provided
@ -251,94 +187,95 @@ module OpenNebula
template['state'] = STATE['PENDING']
if template['roles']
template['roles'].each do |elem|
template['roles'].each { |elem|
elem['state'] ||= Role::STATE['PENDING']
end
}
end
super(template.to_json, template['name'])
end
# Shutdown the service. This action is called when user wants to shutdwon
# the Service
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
def shutdown
if ![Service::STATE['FAILED_SCALING'],
Service::STATE['DONE']].include?(self.state)
self.set_state(Service::STATE['UNDEPLOYING'])
return self.update
else
return OpenNebula::Error.new("Action shutdown: Wrong state" \
" #{self.state_str()}")
end
end
# Recover a failed service.
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
def recover
if [Service::STATE['FAILED_DEPLOYING']].include?(state)
@roles.each do |_name, role|
if [Service::STATE['FAILED_DEPLOYING']].include?(self.state)
@roles.each do |name, role|
if role.state == Role::STATE['FAILED_DEPLOYING']
role.set_state(Role::STATE['PENDING'])
role.recover_deployment()
end
end
set_state(Service::STATE['DEPLOYING'])
self.set_state(Service::STATE['DEPLOYING'])
elsif state == Service::STATE['FAILED_SCALING']
@roles.each do |_name, role|
elsif self.state == Service::STATE['FAILED_SCALING']
@roles.each do |name, role|
if role.state == Role::STATE['FAILED_SCALING']
role.recover_scale()
role.set_state(Role::STATE['SCALING'])
end
end
set_state(Service::STATE['SCALING'])
self.set_state(Service::STATE['SCALING'])
elsif state == Service::STATE['FAILED_UNDEPLOYING']
@roles.each do |_name, role|
elsif self.state == Service::STATE['FAILED_UNDEPLOYING']
@roles.each do |name, role|
if role.state == Role::STATE['FAILED_UNDEPLOYING']
role.set_state(Role::STATE['RUNNING'])
end
end
set_state(Service::STATE['UNDEPLOYING'])
self.set_state(Service::STATE['UNDEPLOYING'])
elsif state == Service::STATE['COOLDOWN']
@roles.each do |_name, role|
elsif self.state == Service::STATE['COOLDOWN']
@roles.each do |name, role|
if role.state == Role::STATE['COOLDOWN']
role.set_state(Role::STATE['RUNNING'])
end
end
set_state(Service::STATE['RUNNING'])
self.set_state(Service::STATE['RUNNING'])
elsif state == Service::STATE['WARNING']
@roles.each do |_name, role|
elsif self.state == Service::STATE['WARNING']
@roles.each do |name, role|
if role.state == Role::STATE['WARNING']
role.recover_warning
role.recover_warning()
end
end
else
OpenNebula::Error.new('Action recover: Wrong state' \
" #{state_str}")
return OpenNebula::Error.new("Action recover: Wrong state" \
" #{self.state_str()}")
end
return self.update
end
# Delete the service. All the VMs are also deleted from OpenNebula.
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
def delete
networks = JSON.parse(self['TEMPLATE/BODY'])['networks_values']
@roles.each { |name, role|
role.delete()
}
networks.each do |net|
next unless net[net.keys[0]].key? 'template_id'
net_id = net[net.keys[0]]['id'].to_i
rc = OpenNebula::VirtualNetwork
.new_with_id(net_id, @client).delete
if OpenNebula.is_error?(rc)
log_info("Error deleting vnet #{net_id}: #{rc}")
end
end
super()
end
def delete_roles
@roles.each do |_name, role|
role.set_state(Role::STATE['DELETING'])
role.delete
end
return super()
end
# Retrieves the information of the Service and all its Nodes.
@ -354,14 +291,14 @@ module OpenNebula
@roles = {}
if @body['roles']
@body['roles'].each do |elem|
@body['roles'].each { |elem|
elem['state'] ||= Role::STATE['PENDING']
role = Role.new(elem, self)
@roles[role.name] = role
end
}
end
nil
return nil
end
# Add an info message in the service information that will be stored
@ -378,10 +315,15 @@ module OpenNebula
add_log(Logger::ERROR, message)
end
# Retrieve the service client
def client
@client
end
# Changes the owner/group
#
# @param [Integer] uid the new owner id. Use -1 to leave the current one
# @param [Integer] gid the new group id. Use -1 to leave the current one
# @param [Integer] uid the new owner id. Set to -1 to leave the current one
# @param [Integer] gid the new group id. Set to -1 to leave the current one
#
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
@ -395,28 +337,26 @@ module OpenNebula
return rc
end
@roles.each do |_name, role|
@roles.each { |name, role|
rc = role.chown(uid, gid)
break if rc[0] == false
end
}
if rc[0] == false
log_error('Chown operation failed, will try to rollback ' \
'all VMs to the old user and group')
update
self.log_error("Chown operation failed, will try to rollback all VMs to the old user and group")
update()
super(old_uid, old_gid)
@roles.each do |_name, role|
@roles.each { |name, role|
role.chown(old_uid, old_gid)
end
}
return OpenNebula::Error.new(rc[1])
end
nil
return nil
end
# Updates a role
@ -425,11 +365,10 @@ module OpenNebula
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
def update_role(role_name, template_json)
if ![Service::STATE['RUNNING'], Service::STATE['WARNING']]
.include?(state)
return OpenNebula::Error.new('Update role: Wrong state' \
" #{state_str}")
if ![Service::STATE['RUNNING'], Service::STATE['WARNING']].include?(self.state)
return OpenNebula::Error.new("Update role: Wrong state" \
" #{self.state_str()}")
end
template = JSON.parse(template_json)
@ -439,8 +378,7 @@ module OpenNebula
role = @roles[role_name]
if role.nil?
return OpenNebula::Error.new("ROLE \"#{role_name}\" " \
'does not exist')
return OpenNebula::Error.new("ROLE \"#{role_name}\" does not exist")
end
rc = role.update(template)
@ -454,15 +392,15 @@ module OpenNebula
role.set_state(Role::STATE['SCALING'])
role.set_default_cooldown_duration
role.set_default_cooldown_duration()
set_state(Service::STATE['SCALING'])
self.set_state(Service::STATE['SCALING'])
update
return self.update
end
def shutdown_action
@body['shutdown_action']
def get_shutdown_action()
return @body['shutdown_action']
end
# Replaces the template contents
@ -473,7 +411,7 @@ module OpenNebula
#
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
def update(template_json = nil, append = false)
def update(template_json=nil, append=false)
if template_json
template = JSON.parse(template_json)
@ -501,93 +439,22 @@ module OpenNebula
#
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
def update_raw(template_raw, append = false)
def update_raw(template_raw, append=false)
super(template_raw, append)
end
def deploy_networks
body = JSON.parse(self['TEMPLATE/BODY'])
return if body['networks_values'].nil?
body['networks_values'].each do |net|
rc = create_vnet(net) if net[net.keys[0]].key?('template_id')
if OpenNebula.is_error?(rc)
return rc
end
rc = reserve(net) if net[net.keys[0]].key?('reserve_from')
if OpenNebula.is_error?(rc)
return rc
end
end
# Replace $attibute by the corresponding value
resolve_attributes(body)
# @body = template.to_hash
update_body(body)
end
def delete_networks
vnets = @body['networks_values']
vnets_failed = []
return if vnets.nil?
vnets.each do |vnet|
next unless vnet[vnet.keys[0]].key?('template_id') ||
vnet[vnet.keys[0]].key?('reserve_from')
vnet_id = vnet[vnet.keys[0]]['id'].to_i
rc = OpenNebula::VirtualNetwork
.new_with_id(vnet_id, @client).delete
if OpenNebula.is_error?(rc)
vnets_failed << vnet_id
end
end
vnets_failed
end
def can_scale?
state == Service::STATE['RUNNING']
end
private
# Maximum number of log entries per service
# TODO: Make this value configurable
MAX_LOG = 50
def update_body(body)
@body = body
# Update @roles attribute with the new @body content
@roles = {}
if @body['roles']
@body['roles'].each do |elem|
elem['state'] ||= Role::STATE['PENDING']
role = Role.new(elem, self)
@roles[role.name] = role
end
end
# Update @xml attribute with the new body content
@xml.at_xpath('/DOCUMENT/TEMPLATE/BODY').children[0].content = @body
end
# @param [Logger::Severity] severity
# @param [String] message
def add_log(severity, message)
severity_str = Logger::SEV_LABEL[severity][0..0]
@body['log'] ||= []
@body['log'] ||= Array.new
@body['log'] << {
:timestamp => Time.now.to_i,
:severity => severity_str,
@ -597,82 +464,5 @@ module OpenNebula
# Truncate the number of log entries
@body['log'] = @body['log'].last(MAX_LOG)
end
def create_vnet(net)
extra = ''
extra = net[net.keys[0]]['extra'] if net[net.keys[0]].key? 'extra'
vntmpl_id = OpenNebula::VNTemplate
.new_with_id(net[net.keys[0]]['template_id']
.to_i, @client).instantiate(get_vnet_name(net), extra)
# TODO, check which error should be returned
return vntmpl_id if OpenNebula.is_error?(vntmpl_id)
net[net.keys[0]]['id'] = vntmpl_id
true
end
def reserve(net)
get_vnet_name(net)
extra = net[net.keys[0]]['extra'] if net[net.keys[0]].key? 'extra'
return false if extra.empty?
extra.concat("\nNAME=\"#{get_vnet_name(net)}\"\n")
reserve_id = OpenNebula::VirtualNetwork
.new_with_id(net[net.keys[0]]['reserve_from']
.to_i, @client).reserve_with_extra(extra)
return reserve_id if OpenNebula.is_error?(reserve_id)
net[net.keys[0]]['id'] = reserve_id
true
end
def get_vnet_name(net)
"#{net.keys[0]}-#{id}"
end
def resolve_attributes(template)
template['roles'].each do |role|
if role['vm_template_contents']
# $CUSTOM1_VAR Any word character (letter, number, underscore)
role['vm_template_contents'].scan(/\$(\w+)/).each do |key|
# Check if $ var value is in custom_attrs_values
if template['custom_attrs_values'].key?(key[0])
role['vm_template_contents'].gsub!(
'$'+key[0],
template['custom_attrs_values'][key[0]])
next
end
# Check if $ var value is in networks
net = template['networks_values']
.find {|att| att.key? key[0] }
next if net.nil?
role['vm_template_contents'].gsub!(
'$'+key[0],
net[net.keys[0]]['id'].to_s
)
end
end
next unless role['user_inputs_values']
role['vm_template_contents'] ||= ''
role['user_inputs_values'].each do |key, value|
role['vm_template_contents'] += "\n#{key}=\"#{value}\""
end
end
end
end
end

View File

@ -15,29 +15,12 @@
#--------------------------------------------------------------------------- #
module OpenNebula
# ServicePool class
class OpenNebulaServicePool < DocumentPoolJSON
class ServicePool < DocumentPoolJSON
DOCUMENT_TYPE = 100
def initialize(client, user_id = -1)
super(client, user_id)
end
def factory(element_xml)
service = OpenNebula::Service.new(element_xml, @client)
service.load_body
service
end
end
# ServicePool class
class ServicePool
@@mutex = Mutex.new
@@mutex_hash = {}
@@mutex_hash = Hash.new
# Class constructor
#
@ -46,38 +29,14 @@ module OpenNebula
# http://opennebula.org/documentation:rel3.6:api
#
# @return [DocumentPool] the new object
def initialize(cloud_auth, client)
# TODO, what if cloud_auth is nil?
@cloud_auth = cloud_auth
@client = client
@one_pool = nil
def initialize(client, user_id=-1)
super(client, user_id)
end
def client
# If there's a client defined use it
return @client unless @client.nil?
# If not, get one via cloud_auth
@cloud_auth.client
end
def info
osp = OpenNebulaServicePool.new(client)
rc = osp.info
@one_pool = osp
rc
end
def to_json
@one_pool.to_json
end
def each(&block)
return if @one_pool.nil?
@one_pool.each(&block)
def factory(element_xml)
service = OpenNebula::Service.new(element_xml, @client)
service.load_body
service
end
# Retrieves a Service element from OpenNebula. The Service::info()
@ -88,66 +47,51 @@ module OpenNebula
# The mutex will be unlocked after the block execution.
#
# @return [Service, OpenNebula::Error] The Service in case of success
def get(service_id, external_client = nil, &block)
def get(service_id, &block)
service_id = service_id.to_i if service_id
aux_client = nil
service = Service.new_with_id(service_id, @client)
if external_client.nil?
aux_client = client
rc = service.info
if OpenNebula.is_error?(rc)
return rc
else
aux_client = external_client
end
if block_given?
obj_mutex = nil
entry = nil
service = Service.new_with_id(service_id, aux_client)
@@mutex.synchronize {
# entry is an array of [Mutex, waiting]
# waiting is the number of threads waiting on this mutex
entry = @@mutex_hash[service_id]
if block_given?
obj_mutex = nil
entry = nil
@@mutex.synchronize do
# entry is an array of [Mutex, waiting]
# waiting is the number of threads waiting on this mutex
entry = @@mutex_hash[service_id]
if entry.nil?
entry = [Mutex.new, 0]
@@mutex_hash[service_id] = entry
end
obj_mutex = entry[0]
entry[1] = entry[1] + 1
if @@mutex_hash.size > 10000
@@mutex_hash.delete_if do |_s_id, entry_loop|
entry_loop[1] == 0
if entry.nil?
entry = [Mutex.new, 0]
@@mutex_hash[service_id] = entry
end
end
obj_mutex = entry[0]
entry[1] = entry[1] + 1
if @@mutex_hash.size > 10000
@@mutex_hash.delete_if { |s_id, entry|
entry[1] == 0
}
end
}
obj_mutex.synchronize {
block.call(service)
}
@@mutex.synchronize {
entry[1] = entry[1] - 1
}
end
rc = obj_mutex.synchronize do
rc = service.info
if OpenNebula.is_error?(rc)
return rc
end
block.call(service)
end
@@mutex.synchronize do
entry[1] = entry[1] - 1
end
if OpenNebula.is_error?(rc)
return rc
end
else
service.info
return service
end
service
end
end
end

View File

@ -152,13 +152,9 @@ module OpenNebula
:required => true
},
'deployment' => {
:type => :string,
:enum => %w{none straight},
:default => 'none'
},
'description' => {
:type => :string,
:default => ''
:type => :string,
:enum => %w{none straight},
:default => 'none'
},
'shutdown_action' => {
:type => :string,
@ -172,37 +168,18 @@ module OpenNebula
},
'custom_attrs' => {
:type => :object,
:properties => { },
:required => false
},
'custom_attrs_values' => {
:type => :object,
:properties => { },
:properties => {
},
:required => false
},
'ready_status_gate' => {
:type => :boolean,
:required => false
},
'networks' => {
:type => :object,
:properties => { },
:required => false
},
'networks_values' => {
:type => :array,
:items => {
:type => :object,
:properties => { }
},
:required => false
}
}
}
def self.init_default_vn_name_template(vn_name_template)
@@vn_name_template = vn_name_template
end
DOCUMENT_TYPE = 101
@ -235,7 +212,9 @@ module OpenNebula
if append
rc = info
return rc if OpenNebula.is_error?(rc)
if OpenNebula.is_error? rc
return rc
end
template = @body.merge(template)
end
@ -269,32 +248,7 @@ module OpenNebula
validate_values(template)
end
def instantiate(merge_template)
rc = nil
if merge_template.nil?
instantiate_template = JSON.parse(@body.to_json)
else
instantiate_template = JSON.parse(@body.to_json)
.merge(merge_template)
end
begin
ServiceTemplate.validate(instantiate_template)
xml = OpenNebula::Service.build_xml
service = OpenNebula::Service.new(xml, @client)
rc = service.allocate(instantiate_template.to_json)
rescue Validator::ParseException, JSON::ParserError => e
return e
end
return rc if OpenNebula.is_error?(rc)
service.info
service
end
private
def self.validate_values(template)
parser = ElasticityGrammarParser.new

View File

@ -16,22 +16,216 @@
require 'strategy/straight'
# Strategy class (module none?)
module Strategy
class Strategy
LOG_COMP = 'STR'
LOG_COMP = "STR"
# Performs a boot step, deploying all nodes that meet the requirements
# @param [Service] service service to boot
# @return [Array<true, nil>, Array<false, String>] true if all the nodes
# were created, false and the error reason if there was a problem
# creating the VMs
def boot_step(service)
Log.debug LOG_COMP, "Boot step", service.id()
roles_deploy = get_roles_deploy(service)
roles_deploy.each { |name, role|
Log.debug LOG_COMP, "Deploying role #{name}", service.id()
rc = role.deploy
if !rc[0]
role.set_state(Role::STATE['FAILED_DEPLOYING'])
return rc
else
role.set_state(Role::STATE['DEPLOYING'])
end
}
return [true, nil]
end
# Performs a shutdown step, shutting down all nodes that meet the requirements
# @param [Service] service service to boot
# @return [Array<true, nil>, Array<false, String>] true if all the nodes
# were created, false and the error reason if there was a problem
# creating the VMs
def shutdown_step(service)
Log.debug LOG_COMP, "Shutdown step", service.id()
roles_shutdown = get_roles_shutdown(service)
roles_shutdown.each { |name, role|
Log.debug LOG_COMP, "Shutting down role #{name}", service.id()
rc = role.shutdown
if !rc[0]
role.set_state(Role::STATE['FAILED_UNDEPLOYING'])
return rc
else
role.set_state(Role::STATE['UNDEPLOYING'])
end
}
return [true, nil]
end
# If a role needs to scale, its cardinality is updated, and its state is set
# to SCALING. Only one role is set to scale.
# @param [Service] service
# @return [true|false] true if any role needs to scale
def apply_scaling_policies(service)
Log.debug LOG_COMP, "Apply scaling policies", service.id()
service.get_roles.each do |name, role|
diff, cooldown_duration = role.scale?
if diff != 0
Log.debug LOG_COMP, "Role #{name} needs to scale #{diff} nodes", service.id()
role.set_cardinality(role.cardinality() + diff)
role.set_state(Role::STATE['SCALING'])
role.set_cooldown_duration(cooldown_duration)
return true
end
end
return false
end
# If a role is scaling, the nodes are created/destroyed to match the current
# cardinality
# @return [Array<true, nil>, Array<false, String>] true if the action was
# performed, false and the error reason if there was a problem
def scale_step(service)
Log.debug LOG_COMP, "Scale step", service.id()
service.get_roles.each do |name, role|
if role.state == Role::STATE['SCALING']
rc = role.scale()
if !rc[0]
role.set_state(Role::STATE['FAILED_SCALING'])
return rc
end
end
end
return [true, nil]
end
# Performs a monitor step, check if the roles already deployed are running
# @param [Service] service service to monitor
# @return [nil]
def monitor_step(service)
Log.debug LOG_COMP, "Monitor step", service.id()
roles_monitor = get_roles_monitor(service)
roles_monitor.each { |name, role|
Log.debug LOG_COMP, "Monitoring role #{name}", service.id()
rc = role.info
case role.state()
when Role::STATE['RUNNING']
if OpenNebula.is_error?(rc) || role_nodes_warning?(role)
role.set_state(Role::STATE['WARNING'])
end
role.update_cardinality()
when Role::STATE['WARNING']
if !OpenNebula.is_error?(rc) && !role_nodes_warning?(role)
role.set_state(Role::STATE['RUNNING'])
end
role.update_cardinality()
when Role::STATE['DEPLOYING']
if OpenNebula.is_error?(rc)
role.set_state(Role::STATE['FAILED_DEPLOYING'])
elsif role_nodes_running?(role)
role.set_state(Role::STATE['RUNNING'])
elsif any_node_failed?(role)
role.set_state(Role::STATE['FAILED_DEPLOYING'])
end
when Role::STATE['SCALING']
if OpenNebula.is_error?(rc)
role.set_state(Role::STATE['FAILED_SCALING'])
elsif role_finished_scaling?(role)
if role.apply_cooldown_duration()
role.set_state(Role::STATE['COOLDOWN'])
else
role.set_state(Role::STATE['RUNNING'])
end
elsif any_node_failed_scaling?(role)
role.set_state(Role::STATE['FAILED_SCALING'])
end
when Role::STATE['COOLDOWN']
if role.cooldown_over?
role.set_state(Role::STATE['RUNNING'])
end
role.update_cardinality()
when Role::STATE['UNDEPLOYING']
if OpenNebula.is_error?(rc)
role.set_state(Role::STATE['FAILED_UNDEPLOYING'])
elsif role_nodes_done?(role)
role.set_state(Role::STATE['DONE'])
elsif any_node_failed?(role)
role.set_state(Role::STATE['FAILED_UNDEPLOYING'])
end
when Role::STATE['FAILED_DEPLOYING']
if !OpenNebula.is_error?(rc) && role_nodes_running?(role)
role.set_state(Role::STATE['RUNNING'])
end
when Role::STATE['FAILED_UNDEPLOYING']
if !OpenNebula.is_error?(rc) && role_nodes_done?(role)
role.set_state(Role::STATE['DONE'])
end
when Role::STATE['FAILED_SCALING']
if !OpenNebula.is_error?(rc) && role_finished_scaling?(role)
role.set_state(Role::STATE['SCALING'])
end
end
}
end
protected
# All subclasses must define these methods
# Returns all node Roles ready to be deployed
# @param [Service] service
# @return [Hash<String, Role>] Roles
def roles_deploy
result = roles.select do |_name, role|
def get_roles_deploy(service)
result = service.get_roles.select {|name, role|
role.state == Role::STATE['PENDING'] ||
role.state == Role::STATE['SCALING']
role.state == Role::STATE['DEPLOYING']
}
# Ruby 1.8 compatibility
if result.instance_of?(Array)
result = Hash[result]
end
result
end
# Returns all node Roles be monitored
# @param [Service] service
# @return [Hash<String, Role>] Roles
def get_roles_monitor(service)
result = service.get_roles.select {|name, role|
![Role::STATE['PENDING'], Role::STATE['DONE']].include?(role.state)
}
# Ruby 1.8 compatibility
if result.instance_of?(Array)
result = Hash[result]
@ -43,12 +237,12 @@ module Strategy
# Returns all node Roles ready to be shutdown
# @param [Service] service
# @return [Hash<String, Role>] Roles
def roles_shutdown
result = roles.reject do |_name, role|
[Role::STATE['UNDEPLOYING'],
Role::STATE['DONE'],
Role::STATE['FAILED_UNDEPLOYING']].include?(role.state)
end
def get_roles_shutdown(service)
result = service.get_roles.select {|name, role|
![Role::STATE['UNDEPLOYING'],
Role::STATE['DONE'],
Role::STATE['FAILED_UNDEPLOYING']].include?(role.state)
}
# Ruby 1.8 compatibility
if result.instance_of?(Array)
@ -58,4 +252,114 @@ module Strategy
result
end
# Determine if the role nodes are running
# @param [Role] role
# @return [true|false]
def role_nodes_running?(role)
if role.get_nodes.size() != role.cardinality()
return false
end
role.get_nodes.each { |node|
return false if !(node && node['running'])
}
return true
end
# Returns true if any VM is in UNKNOWN or FAILED
# @param [Role] role
# @return [true|false]
def role_nodes_warning?(role)
role.get_nodes.each do |node|
if node && node['vm_info']
vm_state = node['vm_info']['VM']['STATE']
lcm_state = node['vm_info']['VM']['LCM_STATE']
# Failure or UNKNOWN
if vm_failure?(node) || (vm_state == '3' && lcm_state == '16')
return true
end
end
end
return false
end
# Determine if any of the role nodes failed
# @param [Role] role
# @return [true|false]
def any_node_failed?(role)
role.get_nodes.each { |node|
if vm_failure?(node)
return true
end
}
return false
end
# Determine if the role nodes are in done state
# @param [Role] role
# @return [true|false]
def role_nodes_done?(role)
role.get_nodes.each { |node|
if node && node['vm_info']
vm_state = node['vm_info']['VM']['STATE']
if vm_state != '6' # DONE
return false
end
else
return false
end
}
return true
end
# Determine if any of the role nodes failed to scale
# @param [Role] role
# @return [true|false]
def any_node_failed_scaling?(role)
role.get_nodes.each { |node|
if node && node['vm_info'] &&
(node['disposed'] == '1' || node['scale_up'] == '1') &&
vm_failure?(node)
return true
end
}
return false
end
def role_finished_scaling?(role)
role.get_nodes.each { |node|
# For scale up, check new nodes are running, or past running
if node
if node['scale_up'] == '1'
return false if !node['running']
end
else
return false
end
}
# TODO: If a shutdown ends in running again (VM doesn't have acpi),
# the role/service will stay in SCALING
# For scale down, it will finish when scaling nodes are deleted
return role.get_nodes.size() == role.cardinality()
end
def vm_failure?(node)
if node && node['vm_info']
return Role.vm_failure?(
vm_state = node['vm_info']['VM']['STATE'],
lcm_state = node['vm_info']['VM']['LCM_STATE'])
end
return false
end
end

View File

@ -14,13 +14,11 @@
# limitations under the License. #
#--------------------------------------------------------------------------- #
# Straight strategy module
module Straight
# Using this strategy the service is deployed based on a directed
# acyclic graph where each node defines its parents.
#
# For example:
# For example:
#
# mysql nfs
# | | \
@ -68,34 +66,41 @@ module Straight
# 2. kvm & myslq
# 3. nfs
# Returns all node Roles ready to be deployed
# @param [Service] service
# @return [Hash<String, Role>] Roles
def roles_deploy
running_roles = roles.select do |_name, role|
def get_roles_deploy(service)
roles = service.get_roles
running_roles = roles.select {|name, role|
role.state == Role::STATE['RUNNING']
end
}
# Ruby 1.8 compatibility
if running_roles.instance_of?(Array)
running_roles = Hash[running_roles]
end
result = roles.select do |_name, role|
result = roles.select {|name, role|
check = true
if role.state == Role::STATE['PENDING']
role.parents.each do |parent|
role.parents.each { |parent|
if !running_roles.include?(parent)
check = false
break
end
end
}
elsif role.state == Role::STATE['DEPLOYING']
check = true
else
check = false
end
check
end
}
# Ruby 1.8 compatibility
if result.instance_of?(Array)
@ -106,31 +111,34 @@ module Straight
end
# Returns all node Roles ready to be shutdown
# @param [Service] service
# @return [Hash<String, Role>] Roles
def roles_shutdown
def get_roles_shutdown(service)
roles = service.get_roles
# Get all the parents from running roles
parents = []
running_roles = {}
roles.each do |name, role|
roles.each { |name, role|
# All roles can be shutdown, except the ones in these states
if ![Role::STATE['UNDEPLOYING'],
Role::STATE['DONE'],
Role::STATE['FAILED_UNDEPLOYING']].include?(role.state)
if (![Role::STATE['UNDEPLOYING'],
Role::STATE['DONE'],
Role::STATE['FAILED_UNDEPLOYING']].include?(role.state) )
running_roles[name]= role
end
# Only the parents of DONE roles can be shutdown
if role.state != Role::STATE['DONE']
if (role.state != Role::STATE['DONE'] )
parents += role.parents
end
end
}
# Select the nodes that are not parent from any node
result = running_roles.reject do |name, _role|
parents.include?(name)
end
result = running_roles.select {|name, role|
!parents.include?(name)
}
# Ruby 1.8 compatibility
if result.instance_of?(Array)
@ -140,4 +148,4 @@ module Straight
result
end
end
end

View File

@ -1,4 +1,3 @@
# rubocop:disable Naming/FileName
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
@ -55,9 +54,6 @@ require 'CloudServer'
require 'models'
require 'log'
require 'LifeCycleManager'
require 'EventManager'
DEFAULT_VM_NAME_TEMPLATE = '$ROLE_NAME_$VM_NUMBER_(service_$SERVICE_ID)'
##############################################################################
@ -66,28 +62,26 @@ DEFAULT_VM_NAME_TEMPLATE = '$ROLE_NAME_$VM_NUMBER_(service_$SERVICE_ID)'
begin
conf = YAML.load_file(CONFIGURATION_FILE)
rescue StandardError => e
rescue Exception => e
STDERR.puts "Error parsing config file #{CONFIGURATION_FILE}: #{e.message}"
exit 1
end
conf[:debug_level] ||= 2
conf[:lcm_interval] ||= 30
conf[:debug_level] ||= 2
conf[:lcm_interval] ||= 30
conf[:default_cooldown] ||= 300
conf[:shutdown_action] ||= 'terminate'
conf[:action_number] ||= 1
conf[:action_period] ||= 60
conf[:vm_name_template] ||= DEFAULT_VM_NAME_TEMPLATE
conf[:auth] = 'opennebula'
conf[:shutdown_action] ||= 'terminate'
conf[:action_number] ||= 1
conf[:action_period] ||= 60
conf[:auth] = 'opennebula'
set :bind, conf[:host]
set :port, conf[:port]
set :config, conf
# rubocop:disable Style/MixinUsage
include CloudLogger
# rubocop:enable Style/MixinUsage
logger = enable_logging ONEFLOW_LOG, conf[:debug_level].to_i
use Rack::Session::Pool, :key => 'oneflow'
@ -95,18 +89,19 @@ use Rack::Session::Pool, :key => 'oneflow'
Log.logger = logger
Log.level = conf[:debug_level].to_i
LOG_COMP = 'ONEFLOW'
Log.info LOG_COMP, 'Starting server'
LOG_COMP = "ONEFLOW"
Log.info LOG_COMP, "Starting server"
begin
ENV['ONE_CIPHER_AUTH'] = ONEFLOW_AUTH
ENV["ONE_CIPHER_AUTH"] = ONEFLOW_AUTH
cloud_auth = CloudAuth.new(conf)
rescue StandardError => e
rescue => e
message = "Error initializing authentication system : #{e.message}"
Log.error LOG_COMP, message
STDERR.puts message
exit(-1)
exit -1
end
set :cloud_auth, cloud_auth
@ -115,43 +110,16 @@ set :cloud_auth, cloud_auth
# Helpers
##############################################################################
before do
auth = Rack::Auth::Basic::Request.new(request.env)
if auth.provided? && auth.basic?
username, password = auth.credentials
@client = OpenNebula::Client.new("#{username}:#{password}",
conf[:one_xmlrpc])
@client = OpenNebula::Client.new("#{username}:#{password}", conf[:one_xmlrpc])
else
error 401, 'A username and password must be provided'
end
end
# Set status error and return the error msg
#
# @param error_msg [String] Error message
# @param error_code [Integer] Http error code
def internal_error(error_msg, error_code)
status error_code
body error_msg
end
# Get HTTP error code based on OpenNebula eror code
#
# @param error [Integer] OpenNebula error code
def one_error_to_http(error)
case error
when OpenNebula::Error::ESUCCESS
200
when OpenNebula::Error::EAUTHORIZATION
401
when OpenNebula::Error::EAUTHENTICATION
403
when OpenNebula::Error::ENO_EXISTS
404
else
500
error 401, "A username and password must be provided"
end
end
@ -162,36 +130,32 @@ end
Role.init_default_cooldown(conf[:default_cooldown])
Role.init_default_shutdown(conf[:shutdown_action])
Role.init_force_deletion(conf[:force_deletion])
conf[:vm_name_template] ||= DEFAULT_VM_NAME_TEMPLATE
Role.init_default_vm_name_template(conf[:vm_name_template])
ServiceTemplate.init_default_vn_name_template(conf[:vn_name_template])
##############################################################################
# HTTP error codes
# LCM thread
##############################################################################
VALIDATION_EC = 400 # bad request by the client
OPERATION_EC = 405 # operation not allowed (e.g: in current state)
GENERAL_EC = 500 # general error
t = Thread.new {
require 'LifeCycleManager'
##############################################################################
# LCM and Event Manager
##############################################################################
ServiceLCM.new(conf[:lcm_interval], cloud_auth).loop
}
t.abort_on_exception = true
# TODO: make thread number configurable?
lcm = ServiceLCM.new(@client, 10, cloud_auth)
##############################################################################
# Service
##############################################################################
get '/service' do
# Read-only object
service_pool = OpenNebula::ServicePool.new(nil, @client)
service_pool = OpenNebula::ServicePool.new(@client, OpenNebula::Pool::INFO_ALL)
rc = service_pool.info
if OpenNebula.is_error?(rc)
return internal_error(rc.message, one_error_to_http(rc.errno))
error CloudServer::HTTP_ERROR_CODE[rc.errno], rc.message
end
status 200
@ -200,11 +164,12 @@ get '/service' do
end
get '/service/:id' do
service = Service.new_with_id(params[:id], @client)
service_pool = OpenNebula::ServicePool.new(@client)
rc = service.info
if OpenNebula.is_error?(rc)
return internal_error(rc.message, one_error_to_http(rc.errno))
service = service_pool.get(params[:id])
if OpenNebula.is_error?(service)
error CloudServer::HTTP_ERROR_CODE[service.errno], service.message
end
status 200
@ -213,157 +178,174 @@ get '/service/:id' do
end
delete '/service/:id' do
# Read-only object
service = OpenNebula::Service.new_with_id(params[:id], @client)
service_pool = OpenNebula::ServicePool.new(@client)
rc = service.info
if OpenNebula.is_error?(rc)
error CloudServer::HTTP_ERROR_CODE[rc.errno], rc.message
rc = nil
service = service_pool.get(params[:id]) { |service|
rc = service.delete
}
if OpenNebula.is_error?(service)
error CloudServer::HTTP_ERROR_CODE[service.errno], service.message
end
# Starts service undeploying async
rc = lcm.undeploy_action(@client, service.id)
if OpenNebula.is_error?(rc)
return internal_error(rc.message, one_error_to_http(rc.errno))
error CloudServer::HTTP_ERROR_CODE[rc.errno], rc.message
end
status 204
end
post '/service/:id/action' do
service_pool = OpenNebula::ServicePool.new(@client)
action = JSON.parse(request.body.read)['action']
opts = action['params']
case action['perform']
when 'recover'
rc = lcm.recover_action(@client, params[:id])
when 'chown'
if opts && opts['owner_id']
u_id = opts['owner_id'].to_i
g_id = (opts['group_id'] || -1).to_i
rc = nil
service = service_pool.get(params[:id]) { |service|
rc = case action['perform']
when 'shutdown'
service.shutdown
when 'recover', 'deploy'
service.recover
when 'chown'
if opts && opts['owner_id']
args = Array.new
args << opts['owner_id'].to_i
args << (opts['group_id'] || -1).to_i
rc = lcm.chown_action(@client, params[:id], u_id, g_id)
else
rc = OpenNebula::Error.new("Action #{action['perform']}: " \
'You have to specify a UID')
end
when 'chgrp'
if opts && opts['group_id']
g_id = opts['group_id'].to_i
ret = service.chown(*args)
rc = lcm.chown_action(@client, params[:id], -1, g_id)
if !OpenNebula.is_error?(ret)
Log.info(LOG_COMP, "Service owner changed to #{args[0]}:#{args[1]}", params[:id])
end
ret
else
OpenNebula::Error.new("Action #{action['perform']}: " <<
"You have to specify a UID")
end
when 'chgrp'
if opts && opts['group_id']
ret = service.chown(-1, opts['group_id'].to_i)
if !OpenNebula.is_error?(ret)
Log.info(LOG_COMP, "Service group changed to #{opts['group_id']}", params[:id])
end
ret
else
OpenNebula::Error.new("Action #{action['perform']}: " <<
"You have to specify a GID")
end
when 'chmod'
if opts && opts['octet']
ret = service.chmod_octet(opts['octet'])
if !OpenNebula.is_error?(ret)
Log.info(LOG_COMP, "Service permissions changed to #{opts['octet']}", params[:id])
end
ret
else
OpenNebula::Error.new("Action #{action['perform']}: " <<
"You have to specify an OCTET")
end
when 'rename'
service.rename(opts['name'])
when 'update'
if opts && opts['append']
if opts['template_json']
begin
rc = service.update(opts['template_json'], true)
status 204
rescue Validator::ParseException, JSON::ParserError
OpenNebula::Error.new($!.message)
end
elsif opts['template_raw']
rc = service.update_raw(opts['template_raw'], true)
status 204
else
OpenNebula::Error.new("Action #{action['perform']}: " <<
"You have to provide a template")
end
else
OpenNebula::Error.new("Action #{action['perform']}: " <<
"Only supported for append")
end
else
rc = OpenNebula::Error.new("Action #{action['perform']}: " \
'You have to specify a GID')
OpenNebula::Error.new("Action #{action['perform']} not supported")
end
when 'chmod'
if opts && opts['octet']
rc = lcm.chmod_action(@client, params[:id], opts['octet'])
else
rc = OpenNebula::Error.new("Action #{action['perform']}: " \
'You have to specify an OCTET')
end
when 'rename'
if opts && opts['name']
rc = lcm.rename_action(@client, params[:id], opts['name'])
else
rc = OpenNebula::Error.new("Action #{action['perform']}: " \
'You have to specify a name')
end
# when 'update'
# if opts && opts['append']
# if opts['template_json']
# begin
# service.update(opts['template_json'], true)
# status 204
# rescue Validator::ParseException, JSON::ParserError => e
# OpenNebula::Error.new(e.message)
# end
# elsif opts['template_raw']
# service.update_raw(opts['template_raw'], true)
# status 204
# else
# OpenNebula::Error.new("Action #{action['perform']}: " \
# 'You have to provide a template')
# end
# else
# OpenNebula::Error.new("Action #{action['perform']}: " \
# 'Only supported for append')
# end
else
rc = OpenNebula::Error.new("Action #{action['perform']} not supported")
}
if OpenNebula.is_error?(service)
error CloudServer::HTTP_ERROR_CODE[service.errno], service.message
end
if OpenNebula.is_error?(rc)
return internal_error(rc.message, one_error_to_http(rc.errno))
error CloudServer::HTTP_ERROR_CODE[rc.errno], rc.message
end
status 204
end
# put '/service/:id/role/:name' do
# service_pool = nil # OpenNebula::ServicePool.new(@client)
#
# rc = nil
# service_rc = service_pool.get(params[:id]) do |service|
# begin
# rc = service.update_role(params[:name], request.body.read)
# rescue Validator::ParseException, JSON::ParserError => e
# return internal_error(e.message, VALIDATION_EC)
# end
# end
#
# if OpenNebula.is_error?(service_rc)
# error CloudServer::HTTP_ERROR_CODE[service_rc.errno], service_rc.message
# end
#
# if OpenNebula.is_error?(rc)
# error CloudServer::HTTP_ERROR_CODE[rc.errno], rc.message
# end
#
# status 204
# end
put '/service/:id/role/:name' do
service_pool = OpenNebula::ServicePool.new(@client)
rc = nil
service = service_pool.get(params[:id]) do |service|
begin
rc = service.update_role(params[:name], request.body.read)
rescue Validator::ParseException, JSON::ParserError
return error 400, $!.message
end
end
if OpenNebula.is_error?(service)
error CloudServer::HTTP_ERROR_CODE[service.errno], service.message
end
if OpenNebula.is_error?(rc)
error CloudServer::HTTP_ERROR_CODE[rc.errno], rc.message
end
status 204
end
post '/service/:id/role/:role_name/action' do
service_pool = OpenNebula::ServicePool.new(@client)
action = JSON.parse(request.body.read)['action']
opts = action['params']
# Use defaults only if one of the options is supplied
if opts['period'].nil? && opts['number'].nil?
opts['period'] = conf[:action_period] if opts['period'].nil?
opts['number'] = conf[:action_number] if opts['number'].nil?
rc = nil
service = service_pool.get(params[:id]) { |service|
roles = service.get_roles
role = roles[params[:role_name]]
if role.nil?
rc = OpenNebula::Error.new("Role '#{params[:role_name]}' not found")
else
# Use defaults only if one of the options is supplied
if opts['period'].nil? ^ opts['number'].nil?
opts['period'] = conf[:action_period] if opts['period'].nil?
opts['number'] = conf[:action_number] if opts['number'].nil?
end
rc = role.batch_action(action['perform'], opts['period'], opts['number'])
end
}
if OpenNebula.is_error?(service)
error CloudServer::HTTP_ERROR_CODE[service.errno], service.message
end
rc = lcm.sched_action(@client,
params[:id],
params[:role_name],
action['perform'],
opts['period'],
opts['number'])
if OpenNebula.is_error?(rc)
return internal_error(rc.message, one_error_to_http(rc.errno))
error CloudServer::HTTP_ERROR_CODE[rc.errno], rc.message
end
status 201
end
post '/service/:id/scale' do
call_body = JSON.parse(request.body.read)
rc = lcm.scale_action(@client,
params[:id],
call_body['role_name'],
call_body['cardinality'].to_i,
call_body['force'])
if OpenNebula.is_error?(rc)
return internal_error(rc.message, one_error_to_http(rc.errno))
end
status 201
body
body rc.to_json
end
##############################################################################
@ -371,8 +353,7 @@ end
##############################################################################
get '/service_template' do
s_template_pool = OpenNebula::ServiceTemplatePool
.new(@client, OpenNebula::Pool::INFO_ALL)
s_template_pool = OpenNebula::ServiceTemplatePool.new(@client, OpenNebula::Pool::INFO_ALL)
rc = s_template_pool.info
if OpenNebula.is_error?(rc)
@ -385,8 +366,7 @@ get '/service_template' do
end
get '/service_template/:id' do
service_template = OpenNebula::ServiceTemplate.new_with_id(params[:id],
@client)
service_template = OpenNebula::ServiceTemplate.new_with_id(params[:id], @client)
rc = service_template.info
if OpenNebula.is_error?(rc)
@ -399,8 +379,7 @@ get '/service_template/:id' do
end
delete '/service_template/:id' do
service_template = OpenNebula::ServiceTemplate.new_with_id(params[:id],
@client)
service_template = OpenNebula::ServiceTemplate.new_with_id(params[:id], @client)
rc = service_template.delete
if OpenNebula.is_error?(rc)
@ -411,13 +390,12 @@ delete '/service_template/:id' do
end
put '/service_template/:id' do
service_template = OpenNebula::ServiceTemplate.new_with_id(params[:id],
@client)
service_template = OpenNebula::ServiceTemplate.new_with_id(params[:id], @client)
begin
rc = service_template.update(request.body.read)
rescue Validator::ParseException, JSON::ParserError => e
return internal_error(e.message, VALIDATION_EC)
rescue Validator::ParseException, JSON::ParserError
error 400, $!.message
end
if OpenNebula.is_error?(rc)
@ -427,18 +405,18 @@ put '/service_template/:id' do
service_template.info
status 200
body service_template.to_json
end
post '/service_template' do
xml = OpenNebula::ServiceTemplate.build_xml
s_template = OpenNebula::ServiceTemplate.new(xml, @client)
s_template = OpenNebula::ServiceTemplate.new(
OpenNebula::ServiceTemplate.build_xml,
@client)
begin
rc = s_template.allocate(request.body.read)
rescue Validator::ParseException, JSON::ParserError => e
return internal_error(e.message, VALIDATION_EC)
rescue Validator::ParseException, JSON::ParserError
error 400, $!.message
end
if OpenNebula.is_error?(rc)
@ -448,142 +426,122 @@ post '/service_template' do
s_template.info
status 201
# body Parser.render(rc)
#body Parser.render(rc)
body s_template.to_json
end
post '/service_template/:id/action' do
service_template = OpenNebula::ServiceTemplate.new_with_id(params[:id],
@client)
service_template = OpenNebula::ServiceTemplate.new_with_id(params[:id], @client)
action = JSON.parse(request.body.read)['action']
opts = action['params']
opts = {} if opts.nil?
# rubocop:disable Style/ConditionalAssignment
case action['perform']
rc = case action['perform']
when 'instantiate'
rc = service_template.info
if OpenNebula.is_error?(rc)
return internal_error(rc.message, one_error_to_http(rc.errno))
error CloudServer::HTTP_ERROR_CODE[rc.errno], rc.message
end
merge_template = opts['merge_template']
service_json = JSON.parse(service_template.to_json)
# Check custom_attrs
body = service_json['DOCUMENT']['TEMPLATE']['BODY']
custom_attrs = body['custom_attrs']
if !merge_template.nil?
begin
orig_template = JSON.parse(service_template.template)
if merge_template
custom_attrs_values = merge_template['custom_attrs_values']
end
instantiate_template = orig_template.merge(merge_template)
if custom_attrs && !(custom_attrs.is_a? Hash)
return internal_error('Wrong custom_attrs format',
VALIDATION_EC)
end
ServiceTemplate.validate(instantiate_template)
if custom_attrs_values && !(custom_attrs_values.is_a? Hash)
return internal_error('Wrong custom_attrs_values format',
VALIDATION_EC)
end
instantiate_template["roles"].each { |role|
if role["vm_template_contents"]
# $CUSTOM1_VAR Any word character (letter, number, underscore)
role["vm_template_contents"].scan(/\$(\w+)/).each { |key|
if instantiate_template["custom_attrs_values"].has_key?(key[0])
role["vm_template_contents"].gsub!(
"$"+key[0],
instantiate_template["custom_attrs_values"][key[0]])
end
}
end
if custom_attrs &&
custom_attrs_values &&
!(custom_attrs.keys - custom_attrs_values.keys).empty?
return internal_error('Every custom_attrs key must have its ' \
'value defined at custom_attrs_value',
VALIDATION_EC)
end
if role["user_inputs_values"]
role["vm_template_contents"] ||= ""
role["user_inputs_values"].each{ |key, value|
role["vm_template_contents"] += "\n#{key}=\"#{value}\""
}
end
}
# Check networks
networks = body['networks']
networks_values = merge_template['networks_values'] if merge_template
instantiate_template_json = instantiate_template.to_json
if networks && !(networks.is_a? Hash)
return internal_error('Wrong networks format', VALIDATION_EC)
end
if networks_values && networks_values.find {|v| !v.is_a? Hash }
return internal_error('Wrong networks_values format', VALIDATION_EC)
end
if networks && networks_values && !(networks.keys -
networks_values.collect {|i| i.keys }.flatten).empty?
return internal_error('Every network key must have its value ' \
'defined at networks_value', VALIDATION_EC)
end
# Creates service document
service = service_template.instantiate(merge_template)
if OpenNebula.is_error?(service)
return internal_error(service.message,
one_error_to_http(service.errno))
elsif service.is_a? StandardError
# there was a JSON validation error
return internal_error(service.message, GENERAL_EC)
else
# Starts service deployment async
rc = lcm.deploy_action(@client, service.id)
if OpenNebula.is_error?(rc)
return internal_error(rc.message, one_error_to_http(rc.errno))
rescue Validator::ParseException, JSON::ParserError
error 400, $!.message
end
service_json = service.nil? ? '' : service.to_json
status 201
body service_json
else
instantiate_template_json = service_template.template
end
service = OpenNebula::Service.new(OpenNebula::Service.build_xml, @client)
rc = service.allocate(instantiate_template_json)
if OpenNebula.is_error?(rc)
error CloudServer::HTTP_ERROR_CODE[rc.errno], rc.message
end
service.info
status 201
body service.to_json
when 'chown'
if opts && opts['owner_id']
args = []
args = Array.new
args << opts['owner_id'].to_i
args << (opts['group_id'].to_i || -1)
status 204
service_template.chown(*args)
else
OpenNebula::Error.new("Action #{action['perform']}: "\
'You have to specify a UID')
OpenNebula::Error.new("Action #{action['perform']}: " <<
"You have to specify a UID")
end
when 'chgrp'
if opts && opts['group_id']
status 204
service_template.chown(-1, opts['group_id'].to_i)
else
OpenNebula::Error.new("Action #{action['perform']}: "\
'You have to specify a GID')
OpenNebula::Error.new("Action #{action['perform']}: " <<
"You have to specify a GID")
end
when 'chmod'
if opts && opts['octet']
status 204
service_template.chmod_octet(opts['octet'])
else
OpenNebula::Error.new("Action #{action['perform']}: "\
'You have to specify an OCTET')
OpenNebula::Error.new("Action #{action['perform']}: " <<
"You have to specify an OCTET")
end
when 'update'
append = opts['append'] == true
if opts && opts['template_json']
begin
service_template.update(opts['template_json'], append)
rc = service_template.update(
opts['template_json'],
(opts['append'] == true))
status 204
rescue Validator::ParseException, JSON::ParserError => e
return internal_error(e.message, VALIDATION_EC)
rescue Validator::ParseException, JSON::ParserError
OpenNebula::Error.new($!.message)
end
elsif opts && opts['template_raw']
service_template.update_raw(opts['template_raw'], append)
rc = service_template.update_raw(
opts['template_raw'],
(opts['append'] == true))
status 204
else
OpenNebula::Error.new("Action #{action['perform']}: "\
'You have to provide a template')
OpenNebula::Error.new("Action #{action['perform']}: " <<
"You have to provide a template")
end
when 'rename'
status 204
@ -597,8 +555,7 @@ post '/service_template/:id/action' do
new_stemplate = OpenNebula::ServiceTemplate.new_with_id(rc, @client)
new_stemplate.info
if OpenNebula.is_error?(new_stemplate)
error CloudServer::HTTP_ERROR_CODE[new_stemplate.errno],
new_stemplate.message
error CloudServer::HTTP_ERROR_CODE[new_stemplate.errno], new_stemplate.message
end
status 201
@ -606,10 +563,8 @@ post '/service_template/:id/action' do
else
OpenNebula::Error.new("Action #{action['perform']} not supported")
end
# rubocop:enable Style/ConditionalAssignment
if OpenNebula.is_error?(rc)
error CloudServer::HTTP_ERROR_CODE[rc.errno], rc.message
end
end
# rubocop:enable Naming/FileName

View File

@ -285,7 +285,7 @@ module Service
exit_code = 0
ids.each do |id|
response = block.call(id) if block_given?
response = block.call(id)
if CloudClient::is_error?(response)
puts response.to_s
@ -296,22 +296,6 @@ module Service
exit_code
end
# Perform an action on a resource
# @param [Integer] id resource id
# @param [Block] block action to be performed
# @return [Integer] exit_code
def self.perform_action(id, &block)
exit_code = 0
response = block.call(id) if block_given?
if CloudClient::is_error?(response)
puts response.to_s
exit_code = response.code.to_i
end
exit_code
end
class Client
def initialize(opts={})
@username = opts[:username] || ENV['ONEFLOW_USER']

View File

@ -252,12 +252,6 @@ module OpenNebula
return @client.call(VN_METHODS[:reserve], @pe_id, rtmpl)
end
def reserve_with_extra(extra)
return Error.new('ID not defined') unless @pe_id
@client.call(VN_METHODS[:reserve], @pe_id, extra)
end
# Removes an Address Range from the VirtualNetwork
def free(ar_id)
return Error.new('ID not defined') if !@pe_id

View File

@ -200,18 +200,3 @@
- vcenter
- support
- nsx
# this display button and clock icon in table of vm
:leases:
suspense:
time: "+1209600"
color: "#000000"
warning:
time: "-86400"
color: "#085aef"
terminate:
time: "+1209600"
color: "#e1ef08"
warning:
time: "-86400"
color: "#ef2808"

View File

@ -74,8 +74,9 @@ define(function(require) {
$(".instantiate_wrapper", context).hide();
this.templatesTable.idInput().off("change").on("change", function(){
this.templatesTable.idInput().on("change", function(){
$(".instantiate_wrapper", context).show();
var template_id = $(this).val();
that.setTemplateId(context, template_id);
});

View File

@ -113,16 +113,16 @@ define(function(require) {
$(".name", context).text(template_json.DOCUMENT.NAME);
$("#instantiate_service_user_inputs", context).empty();
UserInputs.serviceTemplateInsert(
$("#instantiate_service_user_inputs", context),
template_json, {
select_networks: true
});
template_json);
n_roles = template_json.DOCUMENT.TEMPLATE.BODY.roles.length;
n_roles_done = 0;
var total_cost = 0;
$.each(template_json.DOCUMENT.TEMPLATE.BODY.roles, function(index, role){
var div_id = "user_input_role_"+index;
@ -140,31 +140,30 @@ define(function(require) {
success: function (request, vm_template_json){
that.vm_template_json = vm_template_json;
$("#"+div_id, context).empty();
//if (role.vm_template_contents){
if (role.vm_template_contents){
roleTemplate = TemplateUtils.stringToTemplate(role.vm_template_contents);
if(roleTemplate && roleTemplate.APPEND){
var append = roleTemplate.APPEND.split(",");
$.each(append, function(key, value){
if (!that.vm_template_json.VMTEMPLATE.TEMPLATE[value]){
that.vm_template_json.VMTEMPLATE.TEMPLATE[value] = roleTemplate[value];
} else {
if (!Array.isArray(that.vm_template_json.VMTEMPLATE.TEMPLATE[value])){
that.vm_template_json.VMTEMPLATE.TEMPLATE[value] = [that.vm_template_json.VMTEMPLATE.TEMPLATE[value]];
}
if (Array.isArray(roleTemplate[value])){
$.each(roleTemplate[value], function(rkey, rvalue){
that.vm_template_json.VMTEMPLATE.TEMPLATE[value].push(rvalue);
});
} else {
that.vm_template_json.VMTEMPLATE.TEMPLATE[value].push(roleTemplate[value]);
}
var append = roleTemplate.APPEND.split(",");
$.each(append, function(key, value){
if (!that.vm_template_json.VMTEMPLATE.TEMPLATE[value]){
that.vm_template_json.VMTEMPLATE.TEMPLATE[value] = roleTemplate[value];
} else {
if (!Array.isArray(that.vm_template_json.VMTEMPLATE.TEMPLATE[value])){
that.vm_template_json.VMTEMPLATE.TEMPLATE[value] = [that.vm_template_json.VMTEMPLATE.TEMPLATE[value]];
}
delete roleTemplate[value];
});
delete roleTemplate.APPEND;
}
if (Array.isArray(roleTemplate[value])){
$.each(roleTemplate[value], function(rkey, rvalue){
that.vm_template_json.VMTEMPLATE.TEMPLATE[value].push(rvalue);
});
} else {
that.vm_template_json.VMTEMPLATE.TEMPLATE[value].push(roleTemplate[value]);
}
}
delete roleTemplate[value];
});
delete roleTemplate.APPEND;
$.extend(true, that.vm_template_json.VMTEMPLATE.TEMPLATE, roleTemplate);
//}
}
if (vm_template_json.VMTEMPLATE.TEMPLATE["MEMORY_COST"] && vm_template_json.VMTEMPLATE.TEMPLATE["MEMORY_UNIT_COST"] && vm_template_json.VMTEMPLATE.TEMPLATE["MEMORY_UNIT_COST"] === "GB") {
vm_template_json.VMTEMPLATE.TEMPLATE["MEMORY_COST"] = vm_template_json.VMTEMPLATE.TEMPLATE["MEMORY_COST"]*1024;
}
@ -228,43 +227,11 @@ define(function(require) {
var extra_info = {
"merge_template": {}
};
var tmp_json = WizardFields.retrieve($("#instantiate_service_user_inputs", context));
var network_values = [];
var prefix = "type_";
var networks = Object.keys(tmp_json).filter(function(k) {
return k.indexOf('type_') == 0;
}).reduce(function(newData, k) {
var key = "id";
switch (tmp_json[k]) {
case "create":
key = "template_id";
break;
case "reserve":
key = "reserve_from";
break;
default:
break;
}
var internal = {};
internal[k.replace(prefix,"")] = {};
internal[k.replace(prefix,"")][key] = tmp_json[k.replace(prefix,"")];
if(tmp_json[k] === "create" || tmp_json[k] === "reserve"){
internal[k.replace(prefix,"")].extra = tmp_json["extra_"+k.replace(prefix,"")];
}
newData[k.replace(prefix,"")] = internal;
return newData;
}, {});
extra_info.merge_template.custom_attrs_values = tmp_json;
//parse to array
Object.keys(networks).map(function(key_network){
network_values.push(networks[key_network]);
});
//extra_info.merge_template.custom_attrs_values = tmp_json; //OLD
extra_info.merge_template.custom_attrs_values = {};
extra_info.merge_template.networks_values = network_values;
extra_info.merge_template.roles = [];
$.each(that.service_template_json.DOCUMENT.TEMPLATE.BODY.roles, function(index, role){

View File

@ -1157,7 +1157,7 @@ define(function(require) {
});
tab.off("click").on("click", ".provision_select_flow_template .provision-pricing-table.only-one" , function(){
tab.on("click", ".provision_select_flow_template .provision-pricing-table.only-one" , function(){
var context = $("#provision_create_flow");
if ($(this).hasClass("selected")){

View File

@ -30,7 +30,6 @@ define(function(require) {
var UsersTable = require("tabs/users-tab/datatable");
var GroupTable = require("tabs/groups-tab/datatable");
var OpenNebulaHost = require("opennebula/host");
var Leases = require("utils/leases");
/*
TEMPLATES
@ -90,8 +89,7 @@ define(function(require) {
'capacityCreateHTML': CapacityCreate.html(),
'logos': Config.vmLogos,
'usersDatatable': this.usersTable.dataTableHTML,
'groupDatatable': this.groupTable.dataTableHTML,
'leases': Leases.html()
'groupDatatable': this.groupTable.dataTableHTML
});
}
@ -108,8 +106,6 @@ define(function(require) {
.prop('wizard_field_disabled', true);
}
Leases.actions(panelForm);
if (panelForm.resource == "VirtualRouterTemplate"){
$("input[wizard_field=VROUTER]", context).attr("checked", "checked");
}
@ -118,11 +114,11 @@ define(function(require) {
}
function convertCostNumber(number){
if(number >= 1000000){
number = (number/1000000).toFixed(6);
number = (number/1000000).toFixed(6)
return number.toString()+"M";
}
else if(number >= 1000){
number = (number/1000).toFixed(6);
number = (number/1000).toFixed(6)
return number.toString()+"K";
}
return number.toFixed(6);

View File

@ -14,12 +14,7 @@
{{! limitations under the License. }}
{{! -------------------------------------------------------------------------- }}
<div class="row">
<div class="medium-12 columns text-center">
{{{leases}}}
</div>
</div>
<div class="row">
<div id="template_name_form" class="medium-6 columns">
<div id="template_name_form" class="medium-6 columns">
<label for="NAME">
{{tr "Name"}}
<input type="text" wizard_field="NAME" id="NAME" required/>

View File

@ -41,9 +41,9 @@ define(function(require) {
var UsersTable = require("tabs/users-tab/datatable");
var GroupTable = require("tabs/groups-tab/datatable");
var Humanize = require("utils/humanize");
var TemplateUtils = require("utils/template-utils");
var UniqueId = require("utils/unique-id");
var ScheduleActions = require("utils/schedule_action");
var Leases = require("utils/leases");
/*
CONSTANTS
@ -90,8 +90,7 @@ define(function(require) {
function _html() {
return TemplateHTML({
"formPanelId": this.formPanelId,
"leases": Leases.html()
"formPanelId": this.formPanelId
});
}
@ -604,8 +603,10 @@ define(function(require) {
function _onShow(context) {
Sunstone.disableFormPanelSubmit(this.tabId);
$("input.instantiate_pers", context).change();
var templatesContext = $(".list_of_templates", context);
templatesContext.html("");
Tips.setup(context);
return false;
}

View File

@ -36,9 +36,6 @@
</div>
</div>
<div class="row nameContainer">
<div class="medium-12 columns text-center">
{{{leases}}}
</div>
<div class="medium-4 columns">
<label for="vm_name">
{{tr "VM name"}}

View File

@ -13,6 +13,7 @@
{{! See the License for the specific language governing permissions and }}
{{! limitations under the License. }}
{{! -------------------------------------------------------------------------- }}
<div class="row">
<div class="large-6 columns">
<table class="dataTable">

View File

@ -23,9 +23,6 @@ define(function(require) {
var Locale = require('utils/locale');
var Tips = require('utils/tips');
var TemplatesTable = require('tabs/templates-tab/datatable');
var Leases = require("utils/leases");
var OpenNebulaAction = require("opennebula/action");
/*
CONSTANTS
*/
@ -77,38 +74,12 @@ define(function(require) {
$(".nameContainer", context).show();
$(".persistentContainer", context).show();
var templatesContext = $(".list_of_templates", context);
var templatesContext = $(".list_of_templates", context);
templatesContext.html("");
templatesContext.show();
var template_id = $(this).val();
if(template_id){
that.setTemplateIds(context, [template_id]);
var leasesThat = {};
function FormPanel() {
this.name = this.name;
}
Object.assign(leasesThat, that);
leasesThat.resource = "vm";
leasesThat.resourceId = template_id;
if(
OpenNebulaAction &&
OpenNebulaAction.cache &&
OpenNebulaAction.cache("VMTEMPLATE") &&
OpenNebulaAction.cache("VMTEMPLATE").data &&
OpenNebulaAction.cache("VMTEMPLATE").data[template_id] &&
OpenNebulaAction.cache("VMTEMPLATE").data[template_id].VMTEMPLATE &&
OpenNebulaAction.cache("VMTEMPLATE").data[template_id].VMTEMPLATE.TEMPLATE
){
leasesThat.jsonTemplate = OpenNebulaAction.cache("VMTEMPLATE").data[template_id].VMTEMPLATE.TEMPLATE;
}
leasesThat.__proto__ = FormPanel.prototype;
Leases.actions(leasesThat);
}
that.setTemplateIds(context, [template_id]);
});
Tips.setup(context);

View File

@ -126,7 +126,7 @@ define(function(require) {
var tmp_tmpl = new Array();
$.each(that.element.USER_TEMPLATE.SCHED_ACTION, function(i, element) {
if (element.ID && element.ID != index)
if (element.ID != index)
tmp_tmpl[i] = element;
});

View File

@ -20,7 +20,6 @@ define(function(require) {
*/
var Locale = require("utils/locale");
var Leases = require("utils/leases");
var Humanize = require("utils/humanize");
var RenameTr = require("utils/panel/rename-tr");
var PermissionsTable = require("utils/panel/permissions-table");
@ -53,9 +52,11 @@ define(function(require) {
function Panel(info) {
this.title = Locale.tr("Info");
this.icon = "fa-info-circle";
this.element = info[XML_ROOT];
return this;
}
};
Panel.PANEL_ID = PANEL_ID;
Panel.prototype.html = _html;
@ -140,8 +141,7 @@ define(function(require) {
"templateTableVcenterHTML": templateTableVcenterHTML,
"templateTableHTML": templateTableHTML,
"monitoringTableContentHTML": monitoringTableContentHTML,
"vrouterHTML": vrouterHTML,
"leases": Leases.html()
"vrouterHTML": vrouterHTML
});
}
@ -167,7 +167,7 @@ define(function(require) {
if($.isEmptyObject(strippedTemplateVcenter)){
$(".vcenter", context).hide();
}
Leases.actions(that,'vm','update');
TemplateTable.setup(strippedTemplate, RESOURCE, this.element.ID, context, unshownValues, strippedTemplateVcenter);
TemplateTableVcenter.setup(strippedTemplateVcenter, RESOURCE, this.element.ID, context, unshownValues, strippedTemplate);
}

View File

@ -15,9 +15,6 @@
{{! -------------------------------------------------------------------------- }}
<div class="row">
<div class="medium-12 columns text-center">
{{{leases}}}
</div>
<div class="large-6 columns">
<table class="dataTable">
<thead>

View File

@ -1,185 +0,0 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2019, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
define(function(require) {
/*
DEPENDENCIES
*/
var Locale = require('utils/locale');
var TemplateUtils = require("utils/template-utils");
var WizardFields = require('utils/wizard-fields');
var Sunstone = require('sunstone');
var ScheduleActions = require("utils/schedule_action");
var notifier = require("utils/notifier");
/*
CONSTANTS
*/
var classButton = 'button warning leases';
var idElementSchedActions = '#sched_temp_actions_body, #sched_inst_actions_body';
/*
CONSTRUCTOR
*/
return {
html: _html,
actions: _actions
};
/*
FUNCTION DEFINITIONS
*/
function _html(){
if(
config &&
config.system_config &&
config.system_config.leases &&
(config.system_config.leases.suspense || config.system_config.leases.terminate)
){
return $("<button />", {class: classButton}).text(Locale.tr("Add lease")).prop('outerHTML');
}
}
function parseVarToJqueryClass(constant){
return "."+constant.replace(/ /g, '.');
}
function _actions(form, res, act){
if(
form &&
form.constructor &&
form.constructor.name &&
(form.constructor.name === 'FormPanel' || form.constructor.name === 'Panel') &&
config &&
config.system_config &&
config.system_config.leases
){
$(parseVarToJqueryClass(classButton)).off("click").on("click", function(e){
e.preventDefault();
var confLeases = config.system_config.leases;
var confLeasesKeys = Object.keys(confLeases);
var showLeaseMessage = function(){
notifier.notifyCustom(Locale.tr("Added scheduled actions"),"");
};
var addInTemplate = function(){
var last = 0;
var pass = false;
confLeasesKeys.forEach(function(schedAction){
if(confLeases[schedAction] && confLeases[schedAction].time){
var schedActionTime = parseInt(confLeases[schedAction].time,10);
var newAction = {
TIME: last === 0? confLeases[schedAction].time : "+"+(schedActionTime+last),
ACTION: schedAction
};
last = schedActionTime;
$(idElementSchedActions).append(ScheduleActions.fromJSONtoActionsTable(newAction));
pass = true;
}
});
if(pass){
showLeaseMessage();
}
};
var type = form.constructor.name;
var resource = null;
var action = null;
var template = null;
var id = null;
switch (type) {
case 'FormPanel':
resource = form.resource || null;
action = form.action || null;
template = ( form.jsonTemplate ?
form.jsonTemplate
:
(
form.wizardElement ?
WizardFields.retrieve(form.wizardElement)
:
null
)
);
id = form.resourceId || null;
break;
case 'Panel':
resource = res || null;
action = act || null;
template = (form.element && form.element.USER_TEMPLATE? form.element.USER_TEMPLATE : null );
id = (form.element && form.element.ID? form.element.ID : null);
break;
default:
break;
}
if(resource && action && template && id){
switch (resource.toLowerCase()) {
case "template":
addInTemplate();
break;
case "vm":
if(action.toLowerCase() === "update"){
var newSchedActions = [];
var index = (
template && template.SCHED_ACTION ?
(Array.isArray(template.SCHED_ACTION)? template.SCHED_ACTION.length : 1)
:
0
);
var last = 0;
confLeasesKeys.forEach(function(schedAction){
if(confLeases[schedAction] && confLeases[schedAction].time){
var schedActionTime = parseInt(confLeases[schedAction].time,10);
newSchedActions.push(
{
ACTION: schedAction,
TIME: last === 0? confLeases[schedAction].time : "+"+(schedActionTime+last),
ID: (index++).toString()
}
);
last = schedActionTime;
}
});
template.SCHED_ACTION = (
template.SCHED_ACTION?
(Array.isArray(template.SCHED_ACTION)? template.SCHED_ACTION.concat(newSchedActions) : [template.SCHED_ACTION].concat(newSchedActions))
:
newSchedActions
);
template = TemplateUtils.templateToString(template);
Sunstone.runAction("VM.update_template", id, template);
showLeaseMessage();
}else{
addInTemplate();
}
break;
default:
break;
}
}
});
}else{
$(parseVarToJqueryClass(classButton)).off("click").remove();
}
}
});

View File

@ -177,7 +177,7 @@ define(function(require) {
'updateFn': _updateFn,
'list': _list,
'clearLabelsFilter': _clearLabelsFilter,
'getLabelsFilter': _getLabelsFilter
'getLabelsFilter': _getLabelsFilter,
}
return TabDatatable;

View File

@ -18,16 +18,12 @@ define(function(require) {
var Locale = require("utils/locale");
var TemplateUtils = require("utils/template-utils");
var VNetsTable = require("tabs/vnets-tab/datatable");
var VNetsTemplateTable = require("../tabs/vnets-templates-tab/datatable");
var RangeSlider = require("utils/range-slider");
var UniqueId = require("utils/unique-id");
var TemplateHTML = require("hbs!./user-inputs/table");
var RowTemplateHTML = require("hbs!./user-inputs/row");
var network_attrs = [];
var input_attrs = [];
//==============================================================================
// VM & Service user inputs
@ -236,6 +232,7 @@ define(function(require) {
opts.div = div;
opts.user_inputs = inputs;
opts.defaults = $.extend({}, template_json.VMTEMPLATE.TEMPLATE);
return _generateInstantiateUserInputs(opts);
}
@ -245,10 +242,7 @@ define(function(require) {
// returns true if at least one input was inserted
function _generateServiceTemplateUserInputs(div, template_json, opts) {
if(opts == undefined){
opts = {
select_networks: false,
pass: false
};
opts = {};
}
opts.div = div;
@ -272,6 +266,7 @@ define(function(require) {
if (defaults == undefined){
defaults = {};
}
div.empty();
var html = "";
@ -292,133 +287,64 @@ define(function(require) {
opts.network_header = Locale.tr("Network");
}
function checkItemInArray(object={}, list=[], index="name") {
var rtn = true;
if(typeof object === "object" && Array.isArray(list)){
if(
list.some(
function(item){
return (item[index] === object[index]);
}
)
){
rtn = false;
}
}
return rtn;
}
var network_attrs = [];
var input_attrs = [];
$.each(user_inputs, function(key, value) {
var attrs = _parse(key, value);
if (defaults[key] != undefined){
attrs.initial = opts.defaults[key];
}
if (attrs.type == "vnet_id"){
if(checkItemInArray(attrs, network_attrs, 'name')){
network_attrs.push(attrs);
}
network_attrs.push(attrs);
} else {
if(checkItemInArray(attrs, input_attrs, 'name')){
input_attrs.push(attrs);
}
input_attrs.push(attrs);
}
});
if (network_attrs.length > 0) {
html += "<fieldset>";
if (opts.network_header.length > 0) {
html += "<legend>" + opts.network_header + "</legend></div>";
html += "<legend>" +
opts.network_header +
"</legend>" +
"</div>";
}
html += "<div class=\"instantiate_user_inputs\"></div></fieldset>";
div.append(html);
var separator = $("<div>");
html += "<div class=\"instantiate_user_inputs\">" +
"</div>" +
"</fieldset>";
div.append(html);
var separator = "";
var vnetsTable;
$.each(network_attrs, function(index, vnet_attr) {
var unique_id = "vnet_user_input_" + UniqueId.id();
vnetsTable = new VNetsTable(unique_id, {"select": true});
if(opts && opts.select_networks){
$(".instantiate_user_inputs", div).append(
$("<div>", {class:"row"}).append(
$("<div>",{class: "large-12 large-centered columns"}).append(
separator.add(
$("<h5>").text(TemplateUtils.htmlEncode(vnet_attr.description)).add(
$("<div>",{class: "row"}).append(
$("<div>",{class:"columns small-12"}).append(
$("<select>",{
class: "changePlaceDatatable",
wizard_field: 'type_'+vnet_attr.name,
'data-nametable': vnet_attr.name,
'data-idtable': unique_id,
'data-id': index
}).append(
$("<option>",{value:"existing"}).text(Locale.tr("Existing")).add(
$("<option>", {value: "create"}).text(Locale.tr("Create"))
).add(
$("<option>", {value: "reserve"}).text(Locale.tr("Reserve"))
)
)
).add($("<div>",
{
class:"columns small-12",
id:"placeDatatable_"+index
}
).html(vnetsTable.dataTableHTML))
)
)
)
)
)
);
}
separator = $("<hr/>");
$(".instantiate_user_inputs", div).append(
"<div class=\"row\">" +
"<div class=\"large-12 large-centered columns\">" +
separator +
"<h5>" +
TemplateUtils.htmlEncode(vnet_attr.description) +
"</h5>" +
vnetsTable.dataTableHTML +
"</div>" +
"</div>");
separator = "<hr/>";
vnetsTable.initialize();
$("#refresh_button_" + unique_id).click();
vnetsTable.idInput().attr("wizard_field", vnet_attr.name).attr("required", "");
});
if(opts && opts.select_networks){
$(".changePlaceDatatable").change(function(e){
e.preventDefault();
var element = $(this);
var id = element.attr("data-id");
var idtable = element.attr("data-idtable");
var nametable = element.attr("data-nametable");
var value = element.val();
var place = $("#placeDatatable_"+id);
//create a table
if(value === "reserve" || value === "existing"){
var vnetsTable = new VNetsTable(idtable, {"select": true});
place.empty().append(vnetsTable.dataTableHTML);
vnetsTable.initialize();
$("#refresh_button_"+idtable).click();
vnetsTable.idInput().attr("wizard_field", nametable).attr("required", "");
}else{
var vnetsTemplateTable = new VNetsTemplateTable(idtable, {"select": true});
place.empty().append(vnetsTemplateTable.dataTableHTML);
vnetsTemplateTable.initialize();
$("#refresh_button_"+idtable).click();
vnetsTemplateTable.idInput().attr("wizard_field", nametable).attr("required", "");
}
// create input extra
if(value === "create" || value === "reserve"){
// falta colocar el render de las diferentes tablas!!!
if(!place.find(".addExtra_"+id).length){
place.append(
$("<div/>",{class:"row addExtra_"+id}).append(
$("<div/>",{class:"columns small-12"}).append(
$("<label/>").text(Locale.tr("Extra")).add(
$("<input/>",{wizard_field: "extra_"+nametable ,type:"text", name: "extra", id: "extra", placeholder: Locale.tr("Extra") })
)
)
)
);
}
}else{
place.find(".addExtra_"+id).remove();
}
});
}
}
if (input_attrs.length > 0) {
@ -436,7 +362,7 @@ define(function(require) {
div.append(html);
if(opts.defaults && opts.defaults.INPUTS_ORDER){
if(opts.defaults.INPUTS_ORDER){
var order = opts.defaults.INPUTS_ORDER;
var orderJSON = order.split(",");
$.each(orderJSON, function(key, value){
@ -463,22 +389,19 @@ define(function(require) {
} else {
$.each(input_attrs, function(index, custom_attr) {
var tooltip = "";
if(custom_attr && custom_attr.description){
if (custom_attr.type === "list-multiple"){
tooltip = " <span class=\"tip\">" + Locale.tr("Use ctrl key for multiple selection") + "</span>";
}
$(".instantiate_user_inputs", div).append(
"<div class=\"row\">" +
"<div class=\"large-12 large-centered columns\">" +
"<label>" +
TemplateUtils.htmlEncode(custom_attr.description) +
tooltip +
_attributeInput(custom_attr) +
"</label>" +
"</div>" +
"</div>"
);
if (custom_attr.type === "list-multiple"){
tooltip = " <span class=\"tip\">" + Locale.tr("Use ctrl key for multiple selection") + "</span>";
}
$(".instantiate_user_inputs", div).append(
"<div class=\"row\">" +
"<div class=\"large-12 large-centered columns\">" +
"<label>" +
TemplateUtils.htmlEncode(custom_attr.description) +
tooltip +
_attributeInput(custom_attr) +
"</label>" +
"</div>" +
"</div>");
});
}
}

View File

@ -45,8 +45,7 @@
'vnc_proxy_port' : '<%= $vnc.proxy_port %>',
'vnc_client_port' : '<%= $conf[:vnc_client_port] %>',
'allow_vnc_federation' : '<%= (!$conf[:allow_vnc_federation].nil?)? $conf[:allow_vnc_federation] : "no" %>',
'max_upload_file_size' : <%= $conf[:max_upload_file_size] ? $conf[:max_upload_file_size] : "undefined" %>,
'leases' : <%= $conf[:leases] ? $conf[:leases].to_json : "null" %>
'max_upload_file_size' : <%= $conf[:max_upload_file_size] ? $conf[:max_upload_file_size] : "undefined" %>
},
'view' : view,
'available_views' : available_views,