mirror of
https://github.com/OpenNebula/one.git
synced 2025-01-26 10:03:37 +03:00
F #4937: reorganize fsck code
This contains all these changes * Use IMAGE_STATES array from OCA * Including OpenNebula breaks fsck * Extract check_pool_control and check_users_groups * Extract check_clusters * Extract check_datastores * Move schema definitions to a hash * Move counter initialization to its own methods * Add comments to sections To see them use: grep -E '(DATA|FIX|TODO):' fsck.rb * More fsck comments * Move image checks to its own file * Move marketplace/app checks to it's own file * Take out do_ prefix from functions * Move pool_control checks * Move user and group checks * Split check_clusters to external functions There are still functions that regenerate the whole table. This should be fixed later. Look for functions called check_fix_*. Look also for use of REXML instead of Nokogiri. This can be found searching for "Document.new". Some searches are done using plain SQL statements. These can be changed to sequel functions. * Split check_datastores * Move VM checks * Move cluster_vnc_bitmap checks * Move history check * Move vrouter check * Move host checks * Move network checks * Move quota checks * Move template checks * Reorganize quota check code * Calculate quota for datastores with target SELF * Add fsck directory to install.sh * Bug in fsck quota code
This commit is contained in:
parent
6a2f13ede1
commit
5be92e323d
@ -1157,7 +1157,8 @@ ONEDB_FILES="src/onedb/fsck.rb \
|
||||
src/onedb/import_slave.rb \
|
||||
src/onedb/onedb.rb \
|
||||
src/onedb/onedb_backend.rb \
|
||||
src/onedb/sqlite2mysql.rb"
|
||||
src/onedb/sqlite2mysql.rb \
|
||||
src/onedb/fsck"
|
||||
|
||||
ONEDB_SHARED_MIGRATOR_FILES="src/onedb/shared/2.0_to_2.9.80.rb \
|
||||
src/onedb/shared/2.9.80_to_2.9.85.rb \
|
||||
|
2538
src/onedb/fsck.rb
2538
src/onedb/fsck.rb
File diff suppressed because it is too large
Load Diff
175
src/onedb/fsck/cluster.rb
Normal file
175
src/onedb/fsck/cluster.rb
Normal file
@ -0,0 +1,175 @@
|
||||
|
||||
module OneDBFsck
|
||||
def init_cluster
|
||||
cluster = @data_cluster = {}
|
||||
|
||||
@db.fetch("SELECT oid, name FROM cluster_pool") do |row|
|
||||
cluster[row[:oid]] = {}
|
||||
|
||||
cluster[row[:oid]][:name] = row[:name]
|
||||
|
||||
cluster[row[:oid]][:hosts] = []
|
||||
cluster[row[:oid]][:datastores] = Set.new
|
||||
cluster[row[:oid]][:vnets] = Set.new
|
||||
end
|
||||
end
|
||||
|
||||
def check_fix_cluster
|
||||
cluster = @data_cluster
|
||||
create_table(:cluster_pool, :cluster_pool_new)
|
||||
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * from cluster_pool") do |row|
|
||||
cluster_id = row[:oid]
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
# Hosts
|
||||
hosts_elem = doc.root.elements.delete("HOSTS")
|
||||
|
||||
hosts_new_elem = doc.root.add_element("HOSTS")
|
||||
|
||||
cluster[cluster_id][:hosts].each do |id|
|
||||
id_elem = hosts_elem.elements.delete("ID[.=#{id}]")
|
||||
|
||||
if id_elem.nil?
|
||||
log_error("Host #{id} is missing from Cluster " <<
|
||||
"#{cluster_id} host id list")
|
||||
end
|
||||
|
||||
hosts_new_elem.add_element("ID").text = id.to_s
|
||||
end
|
||||
|
||||
hosts_elem.each_element("ID") do |id_elem|
|
||||
log_error("Host #{id_elem.text} is in Cluster " <<
|
||||
"#{cluster_id} host id list, but it should not")
|
||||
end
|
||||
|
||||
|
||||
# Datastores
|
||||
ds_elem = doc.root.elements.delete("DATASTORES")
|
||||
|
||||
ds_new_elem = doc.root.add_element("DATASTORES")
|
||||
|
||||
cluster[cluster_id][:datastores].each do |id|
|
||||
id_elem = ds_elem.elements.delete("ID[.=#{id}]")
|
||||
|
||||
if id_elem.nil?
|
||||
log_error("Datastore #{id} is missing from Cluster " <<
|
||||
"#{cluster_id} datastore id list")
|
||||
end
|
||||
|
||||
ds_new_elem.add_element("ID").text = id.to_s
|
||||
|
||||
if @db.fetch("SELECT * FROM cluster_datastore_relation " <<
|
||||
"WHERE cid=#{cluster_id} AND oid=#{id}").empty?
|
||||
|
||||
log_error("Table cluster_datastore_relation is " <<
|
||||
"missing relation cluster #{cluster_id}, " <<
|
||||
"datastore #{id}")
|
||||
|
||||
@db[:cluster_datastore_relation].insert(
|
||||
cid: cluster_id,
|
||||
oid: id
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
ds_elem.each_element("ID") do |id_elem|
|
||||
log_error("Datastore #{id_elem.text} is in Cluster " <<
|
||||
"#{cluster_id} datastore id list, but " <<
|
||||
"it should not")
|
||||
end
|
||||
|
||||
|
||||
# VNets
|
||||
vnets_elem = doc.root.elements.delete("VNETS")
|
||||
|
||||
vnets_new_elem = doc.root.add_element("VNETS")
|
||||
|
||||
cluster[cluster_id][:vnets].each do |id|
|
||||
id_elem = vnets_elem.elements.delete("ID[.=#{id}]")
|
||||
|
||||
if id_elem.nil?
|
||||
log_error("VNet #{id} is missing from Cluster " <<
|
||||
"#{cluster_id} vnet id list")
|
||||
end
|
||||
|
||||
vnets_new_elem.add_element("ID").text = id.to_s
|
||||
|
||||
if @db.fetch("SELECT * FROM cluster_network_relation " <<
|
||||
"WHERE cid=#{cluster_id} AND oid=#{id}").empty?
|
||||
|
||||
log_error("Table cluster_network_relation is " <<
|
||||
"missing relation cluster #{cluster_id}, " <<
|
||||
"vnet #{id}")
|
||||
|
||||
@db[:cluster_network_relation].insert(
|
||||
cid: cluster_id,
|
||||
oid: id
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
vnets_elem.each_element("ID") do |id_elem|
|
||||
log_error("VNet #{id_elem.text} is in Cluster " <<
|
||||
"#{cluster_id} vnet id list, but it should not")
|
||||
end
|
||||
|
||||
|
||||
row[:body] = doc.root.to_s
|
||||
|
||||
# commit
|
||||
@db[:cluster_pool_new].insert(row)
|
||||
end
|
||||
end
|
||||
|
||||
# Rename table
|
||||
@db.run("DROP TABLE cluster_pool")
|
||||
@db.run("ALTER TABLE cluster_pool_new RENAME TO cluster_pool")
|
||||
end
|
||||
|
||||
def check_fix_cluster_relations
|
||||
cluster = @data_cluster
|
||||
|
||||
@db.transaction do
|
||||
create_table(:cluster_datastore_relation,
|
||||
:cluster_datastore_relation_new)
|
||||
|
||||
@db.fetch("SELECT * from cluster_datastore_relation") do |row|
|
||||
if (cluster[row[:cid]][:datastores].count(row[:oid]) != 1)
|
||||
log_error("Table cluster_datastore_relation contains " <<
|
||||
"relation cluster #{row[:cid]}, datastore " <<
|
||||
"#{row[:oid]}, but it should not")
|
||||
else
|
||||
@db[:cluster_datastore_relation_new].insert(row)
|
||||
end
|
||||
end
|
||||
|
||||
@db.run("DROP TABLE cluster_datastore_relation")
|
||||
@db.run("ALTER TABLE cluster_datastore_relation_new " <<
|
||||
"RENAME TO cluster_datastore_relation")
|
||||
end
|
||||
|
||||
log_time()
|
||||
|
||||
@db.transaction do
|
||||
create_table(:cluster_network_relation,
|
||||
:cluster_network_relation_new)
|
||||
|
||||
@db.fetch("SELECT * from cluster_network_relation") do |row|
|
||||
if (cluster[row[:cid]][:vnets].count(row[:oid]) != 1)
|
||||
log_error("Table cluster_network_relation contains " <<
|
||||
"relation cluster #{row[:cid]}, " <<
|
||||
"vnet #{row[:oid]}, but it should not")
|
||||
else
|
||||
@db[:cluster_network_relation_new].insert(row)
|
||||
end
|
||||
end
|
||||
|
||||
@db.run("DROP TABLE cluster_network_relation")
|
||||
@db.run("ALTER TABLE cluster_network_relation_new " <<
|
||||
"RENAME TO cluster_network_relation")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
40
src/onedb/fsck/cluster_vnc_bitmap.rb
Normal file
40
src/onedb/fsck/cluster_vnc_bitmap.rb
Normal file
@ -0,0 +1,40 @@
|
||||
|
||||
module OneDBFsck
|
||||
def check_cluster_vnc_bitmap
|
||||
fixes = @fixes_cluster_vnc_bitmap = {}
|
||||
cluster_vnc = @data_vm[:vnc]
|
||||
|
||||
vnc_pool_size = 65536
|
||||
|
||||
@db.fetch("SELECT * FROM cluster_pool") do |row|
|
||||
cluster_id = row[:oid]
|
||||
|
||||
if cluster_vnc[cluster_id]
|
||||
map = ""
|
||||
vnc_pool_size.times.each do |i|
|
||||
map << (cluster_vnc[cluster_id].include?(vnc_pool_size - 1 - i) ? "1" : "0")
|
||||
end
|
||||
|
||||
map_encoded = Base64::strict_encode64(Zlib::Deflate.deflate(map))
|
||||
else
|
||||
map_encoded = "eJztwYEAAAAAgCCl/ekWqQoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABqFo8C0Q=="
|
||||
end
|
||||
|
||||
old_map_encoded = @db[:cluster_vnc_bitmap].first(:id => cluster_id)[:map] rescue nil
|
||||
|
||||
if old_map_encoded != map_encoded
|
||||
log_error("Cluster #{cluster_id} has not the proper reserved VNC ports")
|
||||
fixes[cluster_id] = map_encoded
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def fix_cluster_vnc_bitmap
|
||||
@db.transaction do
|
||||
@fixes_cluster_vnc_bitmap.each do |id, map|
|
||||
@db[:cluster_vnc_bitmap].where(oid: id).update(map: map)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
138
src/onedb/fsck/datastore.rb
Normal file
138
src/onedb/fsck/datastore.rb
Normal file
@ -0,0 +1,138 @@
|
||||
|
||||
module OneDBFsck
|
||||
def init_datastore_counters
|
||||
@data_datastore = {}
|
||||
|
||||
@db.fetch("SELECT oid, name FROM datastore_pool") do |row|
|
||||
@data_datastore[row[:oid]] = {
|
||||
name: row[:name],
|
||||
images: []
|
||||
}
|
||||
end
|
||||
end
|
||||
|
||||
def check_datastore_cluster
|
||||
cluster = @data_cluster
|
||||
@fixes_datastore_cluster = {}
|
||||
|
||||
@db.fetch("SELECT oid,body FROM datastore_pool") do |row|
|
||||
doc = nokogiri_doc(row[:body])
|
||||
|
||||
doc.root.xpath("CLUSTERS/ID").each do |e|
|
||||
cluster_id = e.text.to_i
|
||||
|
||||
cluster_entry = cluster[cluster_id]
|
||||
|
||||
if cluster_entry.nil?
|
||||
log_error("Datastore #{row[:oid]} is in cluster " <<
|
||||
"#{cluster_id}, but it does not exist")
|
||||
|
||||
e.remove
|
||||
|
||||
@fixes_datastore_cluster[row[:oid]] = { body: doc.root.to_s }
|
||||
else
|
||||
cluster_entry[:datastores] << row[:oid]
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def fix_datastore_cluster
|
||||
@db.transaction do
|
||||
@fixes_datastore_cluster.each do |id, entry|
|
||||
@db[:datastore_pool].where(oid: id).update(body: entry[:body])
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def check_datastore_image
|
||||
datastore = @data_datastore
|
||||
@fixes_datastore_image = {}
|
||||
|
||||
@db.fetch("SELECT oid,body FROM image_pool") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
ds_id = doc.root.get_text('DATASTORE_ID').to_s.to_i
|
||||
ds_name = doc.root.get_text('DATASTORE')
|
||||
|
||||
if ds_id != -1
|
||||
ds_entry = datastore[ds_id]
|
||||
|
||||
if ds_entry.nil?
|
||||
log_error("Image #{row[:oid]} has datastore #{ds_id}, but it does not exist. The image is probably unusable, and needs to be deleted manually:\n"<<
|
||||
" * The image contents should be deleted manually:\n"<<
|
||||
" #{doc.root.get_text('SOURCE')}\n"<<
|
||||
" * The DB entry can be then deleted with the command:\n"<<
|
||||
" DELETE FROM image_pool WHERE oid=#{row[:oid]};\n"<<
|
||||
" * Run fsck again.\n", false)
|
||||
else
|
||||
if ds_name != ds_entry[:name]
|
||||
log_error("Image #{row[:oid]} has a wrong name for datastore #{ds_id}, #{ds_name}. It will be changed to #{ds_entry[:name]}")
|
||||
|
||||
doc.root.each_element('DATASTORE') do |e|
|
||||
e.text = ds_entry[:name]
|
||||
end
|
||||
|
||||
@fixes_datastore_image[row[:oid]] = doc.root.to_s
|
||||
end
|
||||
|
||||
ds_entry[:images] << row[:oid]
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def fix_datastore_image
|
||||
@db.transaction do
|
||||
@fixes_datastore_image.each do |id, body|
|
||||
@db[:image_pool].where(:oid => id).update(:body => body)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def check_fix_datastore
|
||||
datastore = @data_datastore
|
||||
|
||||
create_table(:datastore_pool, :datastore_pool_new)
|
||||
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * from datastore_pool") do |row|
|
||||
ds_id = row[:oid]
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
images_elem = doc.root.elements.delete("IMAGES")
|
||||
|
||||
images_new_elem = doc.root.add_element("IMAGES")
|
||||
|
||||
datastore[ds_id][:images].each do |id|
|
||||
id_elem = images_elem.elements.delete("ID[.=#{id}]")
|
||||
|
||||
if id_elem.nil?
|
||||
log_error(
|
||||
"Image #{id} is missing from Datastore #{ds_id} "<<
|
||||
"image id list")
|
||||
end
|
||||
|
||||
images_new_elem.add_element("ID").text = id.to_s
|
||||
end
|
||||
|
||||
images_elem.each_element("ID") do |id_elem|
|
||||
log_error(
|
||||
"Image #{id_elem.text} is in Datastore #{ds_id} "<<
|
||||
"image id list, but it should not")
|
||||
end
|
||||
|
||||
|
||||
row[:body] = doc.root.to_s
|
||||
|
||||
# commit
|
||||
@db[:datastore_pool_new].insert(row)
|
||||
end
|
||||
end
|
||||
|
||||
# Rename table
|
||||
@db.run("DROP TABLE datastore_pool")
|
||||
@db.run("ALTER TABLE datastore_pool_new RENAME TO datastore_pool")
|
||||
end
|
||||
end
|
||||
|
58
src/onedb/fsck/group.rb
Normal file
58
src/onedb/fsck/group.rb
Normal file
@ -0,0 +1,58 @@
|
||||
|
||||
module OneDBFsck
|
||||
def check_group
|
||||
@fixes_group = groups_fix = {}
|
||||
|
||||
group = @data_user[:group]
|
||||
|
||||
@db.fetch("SELECT oid,body from group_pool") do |row|
|
||||
gid = row[:oid]
|
||||
doc = Nokogiri::XML(row[:body],nil,NOKOGIRI_ENCODING){|c| c.default_xml.noblanks}
|
||||
|
||||
users_elem = doc.root.at_xpath("USERS")
|
||||
users_elem.remove if !users_elem.nil?
|
||||
|
||||
users_new_elem = doc.create_element("USERS")
|
||||
doc.root.add_child(users_new_elem)
|
||||
|
||||
error_found = false
|
||||
|
||||
group[gid].each do |id|
|
||||
id_elem = users_elem.at_xpath("ID[.=#{id}]")
|
||||
|
||||
if id_elem.nil?
|
||||
log_error("User #{id} is missing from Group #{gid} users id list", !db_version[:is_slave])
|
||||
error_found = true
|
||||
else
|
||||
id_elem.remove
|
||||
end
|
||||
|
||||
users_new_elem.add_child(doc.create_element("ID")).content = id.to_s
|
||||
end
|
||||
|
||||
users_elem.xpath("ID").each do |id_elem|
|
||||
log_error("User #{id_elem.text} is in Group #{gid} users id list, but it should not", !db_version[:is_slave])
|
||||
error_found = true
|
||||
end
|
||||
|
||||
|
||||
if error_found
|
||||
groups_fix[row[:oid]] = doc.root.to_s
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def fix_group
|
||||
groups_fix = @fixes_group
|
||||
|
||||
if !db_version[:is_slave]
|
||||
@db.transaction do
|
||||
groups_fix.each do |id, body|
|
||||
@db[:group_pool].where(:oid => id).update(:body => body)
|
||||
end
|
||||
end
|
||||
elsif !groups_fix.empty?
|
||||
log_msg("^ Group errors need to be fixed in the master OpenNebula")
|
||||
end
|
||||
end
|
||||
end
|
73
src/onedb/fsck/history.rb
Normal file
73
src/onedb/fsck/history.rb
Normal file
@ -0,0 +1,73 @@
|
||||
|
||||
module OneDBFsck
|
||||
def check_history
|
||||
check_history_etime
|
||||
|
||||
log_time
|
||||
|
||||
check_history_opened
|
||||
end
|
||||
|
||||
def check_history_etime
|
||||
# DATA: check history etime
|
||||
|
||||
# Bug #4000 may cause history records with etime=0 when they should
|
||||
# be closed. The last history can be fixed with the VM etime, but
|
||||
# previous history entries need to be fixed manually
|
||||
|
||||
# Query to select history elements that:
|
||||
# - have etime = 0
|
||||
# - are not the last seq
|
||||
@db.fetch("SELECT vid,seq FROM history WHERE (etime = 0 AND seq <> (SELECT MAX(seq) FROM history AS subhistory WHERE history.vid = subhistory.vid) )") do |row|
|
||||
log_error("History record for VM #{row[:vid]} seq # #{row[:seq]} is not closed (etime = 0)", false)
|
||||
end
|
||||
end
|
||||
|
||||
def check_history_opened
|
||||
history_fix = @fixes_history = []
|
||||
|
||||
# DATA: go through all bad history records (etime=0) and ask
|
||||
# DATA: new time values to fix them
|
||||
|
||||
# Query to select history elements that have:
|
||||
# - etime = 0
|
||||
# - is last seq
|
||||
# - VM is DONE
|
||||
@db.fetch("SELECT * FROM history WHERE (etime = 0 AND vid IN (SELECT oid FROM vm_pool WHERE state=6) AND seq = (SELECT MAX(seq) FROM history AS subhistory WHERE history.vid=subhistory.vid))") do |row|
|
||||
log_error("History record for VM #{row[:vid]} seq # #{row[:seq]} is not closed (etime = 0), but the VM is in state DONE")
|
||||
|
||||
etime = 0
|
||||
|
||||
@db.fetch("SELECT body FROM vm_pool WHERE oid=#{row[:vid]}") do |vm_row|
|
||||
vm_doc = nokogiri_doc(vm_row[:body])
|
||||
|
||||
etime = vm_doc.root.at_xpath("ETIME").text.to_i
|
||||
end
|
||||
|
||||
history_doc = nokogiri_doc(row[:body])
|
||||
|
||||
["RETIME", "ESTIME", "EETIME", "ETIME"].each do |att|
|
||||
elem = history_doc.root.at_xpath(att)
|
||||
if (elem.text == "0")
|
||||
elem.content = etime
|
||||
end
|
||||
end
|
||||
|
||||
row[:body] = history_doc.root.to_s
|
||||
row[:etime] = etime
|
||||
|
||||
history_fix.push(row)
|
||||
end
|
||||
end
|
||||
|
||||
def fix_history
|
||||
# DATA: FIX: update history records with fixed data
|
||||
# DATA: TODO: check all fixes to always do the same (update vs rewrite)
|
||||
@db.transaction do
|
||||
@fixes_history.each do |row|
|
||||
@db[:history].where(vid: row[:vid], seq: row[:seq]).update(row)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
179
src/onedb/fsck/host.rb
Normal file
179
src/onedb/fsck/host.rb
Normal file
@ -0,0 +1,179 @@
|
||||
|
||||
module OneDBFsck
|
||||
# Initialize all the host counters to 0
|
||||
def init_host_counters
|
||||
@db.fetch("SELECT oid, name FROM host_pool") do |row|
|
||||
counters[:host][row[:oid]] = {
|
||||
:name => row[:name],
|
||||
:memory => 0,
|
||||
:cpu => 0,
|
||||
:rvms => Set.new
|
||||
}
|
||||
end
|
||||
end
|
||||
|
||||
def check_host_cluster
|
||||
cluster = @data_cluster
|
||||
hosts_fix = @fixes_host_cluster = {}
|
||||
|
||||
@db.fetch("SELECT oid,body,cid FROM host_pool") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
cluster_id = doc.root.get_text('CLUSTER_ID').to_s.to_i
|
||||
cluster_name = doc.root.get_text('CLUSTER')
|
||||
|
||||
if cluster_id != row[:cid]
|
||||
log_error("Host #{row[:oid]} is in cluster #{cluster_id}, " <<
|
||||
"but cid column has cluster #{row[:cid]}")
|
||||
hosts_fix[row[:oid]] = {:body => row[:body], :cid => cluster_id}
|
||||
end
|
||||
|
||||
if cluster_id != -1
|
||||
cluster_entry = cluster[cluster_id]
|
||||
|
||||
if cluster_entry.nil?
|
||||
log_error("Host #{row[:oid]} is in cluster " <<
|
||||
"#{cluster_id}, but it does not exist")
|
||||
|
||||
doc.root.each_element('CLUSTER_ID') do |e|
|
||||
e.text = "-1"
|
||||
end
|
||||
|
||||
doc.root.each_element('CLUSTER') do |e|
|
||||
e.text = ""
|
||||
end
|
||||
|
||||
hosts_fix[row[:oid]] = {:body => doc.root.to_s, :cid => -1}
|
||||
else
|
||||
if cluster_name != cluster_entry[:name]
|
||||
log_error("Host #{row[:oid]} has a wrong name for " <<
|
||||
"cluster #{cluster_id}, #{cluster_name}. " <<
|
||||
"It will be changed to #{cluster_entry[:name]}")
|
||||
|
||||
doc.root.each_element('CLUSTER') do |e|
|
||||
e.text = cluster_entry[:name]
|
||||
end
|
||||
|
||||
hosts_fix[row[:oid]] = {
|
||||
body: doc.root.to_s,
|
||||
cid: cluster_id
|
||||
}
|
||||
end
|
||||
|
||||
cluster_entry[:hosts] << row[:oid]
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def fix_host_cluster
|
||||
@db.transaction do
|
||||
@fixes_host_cluster.each do |id, entry|
|
||||
@db[:host_pool].where(oid: id).update(
|
||||
body: entry[:body],
|
||||
cid: entry[:cid]
|
||||
)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def check_host
|
||||
@fixes_host = {}
|
||||
|
||||
# DATA: FIX: Calculate the host's xml and write them to host_pool_new
|
||||
@db[:host_pool].each do |row|
|
||||
host_doc = nokogiri_doc(row[:body])
|
||||
|
||||
error = false
|
||||
|
||||
hid = row[:oid]
|
||||
|
||||
counters_host = counters[:host][hid]
|
||||
|
||||
rvms = counters_host[:rvms].size
|
||||
cpu_usage = (counters_host[:cpu]*100).to_i
|
||||
mem_usage = counters_host[:memory]*1024
|
||||
|
||||
# rewrite running_vms
|
||||
host_doc.root.xpath("HOST_SHARE/RUNNING_VMS").each {|e|
|
||||
if e.text != rvms.to_s
|
||||
log_error(
|
||||
"Host #{hid} RUNNING_VMS has #{e.text} \tis\t#{rvms}")
|
||||
e.content = rvms
|
||||
error = true
|
||||
end
|
||||
}
|
||||
|
||||
# re-do list of VM IDs
|
||||
vms_elem = host_doc.root.at_xpath("VMS").remove
|
||||
|
||||
vms_new_elem = host_doc.create_element("VMS")
|
||||
host_doc.root.add_child(vms_new_elem)
|
||||
|
||||
counters_host[:rvms].each do |id|
|
||||
id_elem = vms_elem.at_xpath("ID[.=#{id}]")
|
||||
|
||||
if id_elem.nil?
|
||||
log_error(
|
||||
"VM #{id} is missing from Host #{hid} VM id list")
|
||||
error = true
|
||||
else
|
||||
id_elem.remove
|
||||
end
|
||||
|
||||
vms_new_elem.add_child(host_doc.create_element("ID")).content = id.to_s
|
||||
end
|
||||
|
||||
vms_elem.xpath("ID").each do |id_elem|
|
||||
log_error(
|
||||
"VM #{id_elem.text} is in Host #{hid} VM id list, "<<
|
||||
"but it should not")
|
||||
error = true
|
||||
end
|
||||
|
||||
host_doc.root.xpath("HOST_SHARE/PCI_DEVICES/PCI").each do |pci|
|
||||
if !pci.at_xpath("VMID").nil?
|
||||
vmid = pci.at_xpath("VMID").text.to_i
|
||||
|
||||
if vmid != -1 && !counters_host[:rvms].include?(vmid)
|
||||
log_error("VM #{vmid} has a PCI device assigned in host #{hid}, but it should not. Device: #{pci.at_xpath('DEVICE_NAME').text}")
|
||||
pci.at_xpath("VMID").content = "-1"
|
||||
error = true
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# rewrite cpu
|
||||
host_doc.root.xpath("HOST_SHARE/CPU_USAGE").each {|e|
|
||||
if e.text != cpu_usage.to_s
|
||||
log_error(
|
||||
"Host #{hid} CPU_USAGE has #{e.text} "<<
|
||||
"\tis\t#{cpu_usage}")
|
||||
e.content = cpu_usage
|
||||
error = true
|
||||
end
|
||||
}
|
||||
|
||||
# rewrite memory
|
||||
host_doc.root.xpath("HOST_SHARE/MEM_USAGE").each {|e|
|
||||
if e.text != mem_usage.to_s
|
||||
log_error("Host #{hid} MEM_USAGE has #{e.text} "<<
|
||||
"\tis\t#{mem_usage}")
|
||||
e.content = mem_usage
|
||||
error = true
|
||||
end
|
||||
}
|
||||
|
||||
@fixes_host[hid] = host_doc.root.to_s if error
|
||||
end
|
||||
end
|
||||
|
||||
def fix_host
|
||||
@db.transaction do
|
||||
@fixes_host.each do |id, body|
|
||||
@db[:host_pool].where(oid: id).update(body: body)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
191
src/onedb/fsck/image.rb
Normal file
191
src/onedb/fsck/image.rb
Normal file
@ -0,0 +1,191 @@
|
||||
|
||||
module OneDBFsck
|
||||
# Init image counters
|
||||
def init_image_counters
|
||||
@db.fetch("SELECT oid,body FROM image_pool") do |row|
|
||||
if counters[:image][row[:oid]].nil?
|
||||
counters[:image][row[:oid]] = {
|
||||
:vms => Set.new,
|
||||
:clones => Set.new,
|
||||
:app_clones => Set.new
|
||||
}
|
||||
end
|
||||
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
doc.root.each_element("CLONING_ID") do |e|
|
||||
img_id = e.text.to_i
|
||||
|
||||
if counters[:image][img_id].nil?
|
||||
counters[:image][img_id] = {
|
||||
:vms => Set.new,
|
||||
:clones => Set.new,
|
||||
:app_clones => Set.new
|
||||
}
|
||||
end
|
||||
|
||||
counters[:image][img_id][:clones].add(row[:oid])
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def check_image
|
||||
@fixes_image = {}
|
||||
|
||||
@db.transaction do
|
||||
@db[:image_pool].each do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
oid = row[:oid]
|
||||
|
||||
persistent = ( doc.root.get_text('PERSISTENT').to_s == "1" )
|
||||
current_state = doc.root.get_text('STATE').to_s.to_i
|
||||
|
||||
counters_img = counters[:image][oid]
|
||||
|
||||
rvms = counters_img[:vms].size
|
||||
n_cloning_ops = counters_img[:clones].size + counters_img[:app_clones].size
|
||||
|
||||
# DATA: CHECK: running vm counter with this image
|
||||
# rewrite running_vms
|
||||
doc.root.each_element("RUNNING_VMS") {|e|
|
||||
if e.text != rvms.to_s
|
||||
log_error("Image #{oid} RUNNING_VMS has #{e.text} \tis\t#{rvms}")
|
||||
e.text = rvms
|
||||
end
|
||||
}
|
||||
|
||||
# re-do list of VM IDs
|
||||
vms_elem = doc.root.elements.delete("VMS")
|
||||
|
||||
vms_new_elem = doc.root.add_element("VMS")
|
||||
|
||||
# DATA: CHECK: running vm list with this image
|
||||
counters_img[:vms].each do |id|
|
||||
id_elem = vms_elem.elements.delete("ID[.=#{id}]")
|
||||
|
||||
if id_elem.nil?
|
||||
log_error("VM #{id} is missing from Image #{oid} VM id list")
|
||||
end
|
||||
|
||||
vms_new_elem.add_element("ID").text = id.to_s
|
||||
end
|
||||
|
||||
vms_elem.each_element("ID") do |id_elem|
|
||||
log_error("VM #{id_elem.text} is in Image #{oid} VM id list, but it should not")
|
||||
end
|
||||
|
||||
|
||||
if ( persistent && rvms > 0 )
|
||||
n_cloning_ops = 0
|
||||
counters_img[:clones] = Set.new
|
||||
counters_img[:app_clones] = Set.new
|
||||
end
|
||||
|
||||
# DATA: CHECK: Check number of clones
|
||||
doc.root.each_element("CLONING_OPS") { |e|
|
||||
if e.text != n_cloning_ops.to_s
|
||||
log_error("Image #{oid} CLONING_OPS has #{e.text} \tis\t#{n_cloning_ops}")
|
||||
e.text = n_cloning_ops
|
||||
end
|
||||
}
|
||||
|
||||
# re-do list of Images cloning this one
|
||||
clones_elem = doc.root.elements.delete("CLONES")
|
||||
|
||||
clones_new_elem = doc.root.add_element("CLONES")
|
||||
|
||||
# DATA: CHECK: image clones (is it used?)
|
||||
counters_img[:clones].each do |id|
|
||||
id_elem = clones_elem.elements.delete("ID[.=#{id}]")
|
||||
|
||||
if id_elem.nil?
|
||||
log_error("Image #{id} is missing from Image #{oid} CLONES id list")
|
||||
end
|
||||
|
||||
clones_new_elem.add_element("ID").text = id.to_s
|
||||
end
|
||||
|
||||
clones_elem.each_element("ID") do |id_elem|
|
||||
log_error("Image #{id_elem.text} is in Image #{oid} CLONES id list, but it should not")
|
||||
end
|
||||
|
||||
# re-do list of Apps cloning this one
|
||||
clones_elem = doc.root.elements.delete("APP_CLONES")
|
||||
|
||||
clones_new_elem = doc.root.add_element("APP_CLONES")
|
||||
|
||||
# DATA: CHECK: check app clones
|
||||
# DATA: TODO: understand app clones and image clones
|
||||
counters_img[:app_clones].each do |id|
|
||||
id_elem = clones_elem.elements.delete("ID[.=#{id}]")
|
||||
|
||||
if id_elem.nil?
|
||||
log_error("Marketplace App #{id} is missing from Image #{oid} APP_CLONES id list")
|
||||
end
|
||||
|
||||
clones_new_elem.add_element("ID").text = id.to_s
|
||||
end
|
||||
|
||||
clones_elem.each_element("ID") do |id_elem|
|
||||
log_error("Marketplace App #{id_elem.text} is in Image #{oid} APP_CLONES id list, but it should not")
|
||||
end
|
||||
|
||||
|
||||
# DATA: Check state
|
||||
# DATA: TODO: Error state is taken into account?
|
||||
|
||||
state = current_state
|
||||
|
||||
if persistent
|
||||
if ( rvms > 0 )
|
||||
state = 8 # USED_PERS
|
||||
elsif ( n_cloning_ops > 0 )
|
||||
state = 6 # CLONE
|
||||
elsif ( current_state == 8 || current_state == 6 )
|
||||
# rvms == 0 && n_cloning_ops == 0, but image is in state
|
||||
# USED_PERS or CLONE
|
||||
|
||||
state = 1 # READY
|
||||
end
|
||||
else
|
||||
if ( rvms > 0 || n_cloning_ops > 0 )
|
||||
state = 2 # USED
|
||||
elsif ( current_state == 2 )
|
||||
# rvms == 0 && n_cloning_ops == 0, but image is in state
|
||||
# USED
|
||||
|
||||
state = 1 # READY
|
||||
end
|
||||
end
|
||||
|
||||
doc.root.each_element("STATE") { |e|
|
||||
if e.text != state.to_s
|
||||
log_error("Image #{oid} has STATE " <<
|
||||
OpenNebula::Image::IMAGE_STATES[e.text.to_i] <<
|
||||
" \tis\t#{OpenNebula::Image::IMAGE_STATES[state]}")
|
||||
e.text = state
|
||||
end
|
||||
}
|
||||
|
||||
# row[:body] = doc.root.to_s
|
||||
|
||||
# # commit
|
||||
# @db[:image_pool_new].insert(row)
|
||||
|
||||
@fixes_image[oid] = doc.root.to_s
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def fix_image
|
||||
# # Rename table
|
||||
# @db.run("DROP TABLE image_pool")
|
||||
# @db.run("ALTER TABLE image_pool_new RENAME TO image_pool")
|
||||
|
||||
@fixes_image.each do |oid, body|
|
||||
@db[:image_pool].where(oid: oid).update(body: body)
|
||||
end
|
||||
end
|
||||
|
||||
end
|
90
src/onedb/fsck/marketplace.rb
Normal file
90
src/onedb/fsck/marketplace.rb
Normal file
@ -0,0 +1,90 @@
|
||||
|
||||
module OneDBFsck
|
||||
# Needs:
|
||||
#
|
||||
# @data_marketplaceapp: set by do_check_marketplaceapp
|
||||
#
|
||||
# Sets:
|
||||
#
|
||||
# @fixes_marketplace: used by do_check_marketplace
|
||||
|
||||
def check_marketplace
|
||||
marketplace = @data_marketplaceapp[:marketplace]
|
||||
|
||||
@fixes_marketplace = {}
|
||||
markets_fix = {}
|
||||
|
||||
# DATA: check marketplace pool
|
||||
@db.fetch("SELECT oid,body FROM marketplace_pool") do |row|
|
||||
market_id = row[:oid]
|
||||
doc = Nokogiri::XML(row[:body],nil,NOKOGIRI_ENCODING){|c| c.default_xml.noblanks}
|
||||
|
||||
apps_elem = doc.root.at_xpath("MARKETPLACEAPPS")
|
||||
apps_elem.remove if !apps_elem.nil?
|
||||
|
||||
apps_new_elem = doc.create_element("MARKETPLACEAPPS")
|
||||
doc.root.add_child(apps_new_elem)
|
||||
|
||||
error = false
|
||||
|
||||
# DATA: CHECK: are all apps in the marketplace?
|
||||
marketplace[market_id][:apps].each do |id|
|
||||
id_elem = apps_elem.at_xpath("ID[.=#{id}]")
|
||||
|
||||
if id_elem.nil?
|
||||
error = true
|
||||
|
||||
log_error(
|
||||
"Marketplace App #{id} is missing from Marketplace #{market_id} "<<
|
||||
"app id list")
|
||||
else
|
||||
id_elem.remove
|
||||
end
|
||||
|
||||
apps_new_elem.add_child(doc.create_element("ID")).content = id.to_s
|
||||
end
|
||||
|
||||
# DATA: CHECK: listed apps that don't belong to the marketplace
|
||||
apps_elem.xpath("ID").each do |id_elem|
|
||||
error = true
|
||||
|
||||
log_error(
|
||||
"Marketplace App #{id_elem.text} is in Marketplace #{market_id} "<<
|
||||
"app id list, but it should not")
|
||||
end
|
||||
|
||||
zone_elem = doc.root.at_xpath("ZONE_ID")
|
||||
|
||||
# DATA: CHECK: zone id
|
||||
if (zone_elem.nil? || zone_elem.text == "-1")
|
||||
error = true
|
||||
|
||||
log_error("Marketplace #{market_id} has an invalid ZONE_ID. Will be set to 0")
|
||||
|
||||
if (zone_elem.nil?)
|
||||
zone_elem = doc.root.add_child(doc.create_element("ZONE_ID"))
|
||||
end
|
||||
|
||||
zone_elem.content = "0"
|
||||
end
|
||||
|
||||
if (error)
|
||||
@fixes_marketplace[row[:oid]] = doc.root.to_s
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def fix_marketplace
|
||||
# DATA: FIX: update each marketplace that needs fixing
|
||||
if !db_version[:is_slave]
|
||||
@db.transaction do
|
||||
@fixes_marketplace.each do |id, body|
|
||||
@db[:marketplace_pool].where(:oid => id).update(:body => body)
|
||||
end
|
||||
end
|
||||
elsif !markets_fix.empty?
|
||||
log_msg("^ Marketplace errors need to be fixed in the master OpenNebula")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
86
src/onedb/fsck/marketplaceapp.rb
Normal file
86
src/onedb/fsck/marketplaceapp.rb
Normal file
@ -0,0 +1,86 @@
|
||||
|
||||
module OneDBFsck
|
||||
# Sets:
|
||||
#
|
||||
# @data_marketplaceapp: used also by marketplace check
|
||||
# @fixes_marketplaceapp: used by do_check_marketplaceapp
|
||||
|
||||
def check_marketplaceapp
|
||||
@data_marketplaceapp = {
|
||||
marketplace: {}
|
||||
}
|
||||
|
||||
marketplace = @data_marketplaceapp[:marketplace]
|
||||
|
||||
# DATA: create marketplace hash with its name and empty apps array
|
||||
@db.fetch("SELECT oid, name FROM marketplace_pool") do |row|
|
||||
marketplace[row[:oid]] = {:name => row[:name], :apps => []}
|
||||
end
|
||||
|
||||
@fixes_marketplaceapp = {}
|
||||
apps_fix = @fixes_marketplaceapp
|
||||
|
||||
# DATA: go through all apps
|
||||
@db.fetch("SELECT oid,body FROM marketplaceapp_pool") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
market_id = doc.root.get_text('MARKETPLACE_ID').to_s.to_i
|
||||
market_name = doc.root.get_text('MARKETPLACE')
|
||||
|
||||
####################################################################
|
||||
# DATA: TODO, BUG: this code will only work for a standalone oned.
|
||||
# In a federation, the image ID will refer to a different image
|
||||
# in each zone
|
||||
####################################################################
|
||||
|
||||
# DATA: get image origin id. Does it work?
|
||||
origin_id = doc.root.get_text('ORIGIN_ID').to_s.to_i
|
||||
if origin_id >= 0 && doc.root.get_text('STATE').to_s.to_i == 2 # LOCKED
|
||||
counters[:image][origin_id][:app_clones].add(row[:oid])
|
||||
end
|
||||
|
||||
####################################################################
|
||||
#####################################################################
|
||||
|
||||
if market_id != -1
|
||||
market_entry = marketplace[market_id]
|
||||
|
||||
# DATA: CHECK: does marketplace for this app exist?
|
||||
if market_entry.nil?
|
||||
log_error("Marketplace App #{row[:oid]} has marketplace #{market_id}, but it does not exist. The app is probably unusable, and needs to be deleted manually:\n"<<
|
||||
" * The DB entry can be deleted with the command:\n"<<
|
||||
" DELETE FROM marketplaceapp_pool WHERE oid=#{row[:oid]};\n"<<
|
||||
" * Run fsck again.\n", false)
|
||||
else
|
||||
# DATA: CHECK: marketplace name is correct
|
||||
if market_name != market_entry[:name]
|
||||
log_error("Marketplace App #{row[:oid]} has a wrong name for marketplace #{market_id}, #{market_name}. It will be changed to #{market_entry[:name]}")
|
||||
|
||||
doc.root.each_element('MARKETPLACE') do |e|
|
||||
e.text = market_entry[:name]
|
||||
end
|
||||
|
||||
apps_fix[row[:oid]] = doc.root.to_s
|
||||
end
|
||||
|
||||
# DATA: Add app to marketplace list. Used in marketplace check
|
||||
market_entry[:apps] << row[:oid]
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def fix_marketplaceapp
|
||||
# DATA: FIX: fix marketplace app data
|
||||
if !db_version[:is_slave]
|
||||
@db.transaction do
|
||||
@fixes_marketplaceapp.each do |id, body|
|
||||
@db[:marketplaceapp_pool].where(:oid => id).update(:body => body)
|
||||
end
|
||||
end
|
||||
elsif !apps_fix.empty?
|
||||
log_msg("^ Marketplace App errors need to be fixed in the master OpenNebula")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
471
src/onedb/fsck/network.rb
Normal file
471
src/onedb/fsck/network.rb
Normal file
@ -0,0 +1,471 @@
|
||||
|
||||
module OneDBFsck
|
||||
|
||||
# Init vnet counters
|
||||
def init_network_counters
|
||||
@db.fetch("SELECT oid,body FROM network_pool") do |row|
|
||||
doc = nokogiri_doc(row[:body])
|
||||
|
||||
ar_leases = {}
|
||||
|
||||
doc.root.xpath("AR_POOL/AR/AR_ID").each do |ar_id|
|
||||
ar_leases[ar_id.text.to_i] = {}
|
||||
end
|
||||
|
||||
counters[:vnet][row[:oid]] = {
|
||||
:ar_leases => ar_leases,
|
||||
:no_ar_leases => {}
|
||||
}
|
||||
end
|
||||
end
|
||||
|
||||
def check_network_cluster
|
||||
cluster = @data_cluster
|
||||
@fixes_host_cluster = {}
|
||||
|
||||
@db.fetch("SELECT oid,body FROM network_pool") do |row|
|
||||
doc = nokogiri_doc(row[:body])
|
||||
|
||||
doc.root.xpath("CLUSTERS/ID").each do |e|
|
||||
cluster_id = e.text.to_i
|
||||
|
||||
cluster_entry = cluster[cluster_id]
|
||||
|
||||
if cluster_entry.nil?
|
||||
log_error("VNet #{row[:oid]} is in cluster " <<
|
||||
"#{cluster_id}, but it does not exist")
|
||||
|
||||
e.remove
|
||||
|
||||
@fixes_host_cluster[row[:oid]] = { body: doc.root.to_s }
|
||||
else
|
||||
cluster_entry[:vnets] << row[:oid]
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def fix_network_cluster
|
||||
@db.transaction do
|
||||
@fixes_host_cluster.each do |id, entry|
|
||||
@db[:host_pool].where(oid: id).update(body: entry[:body])
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def init_network_lease_counters
|
||||
@db.fetch("SELECT oid,body,pid FROM network_pool WHERE pid<>-1") do |row|
|
||||
doc = nokogiri_doc(row[:body])
|
||||
|
||||
parent_vnet = doc.root.at_xpath("PARENT_NETWORK_ID").text.to_i
|
||||
|
||||
if (row[:pid] != parent_vnet)
|
||||
# TODO
|
||||
end
|
||||
|
||||
doc.root.xpath("AR_POOL/AR").each do |ar|
|
||||
parent_ar_e = ar.at_xpath("PARENT_NETWORK_AR_ID")
|
||||
if !(parent_ar_e.nil? || parent_ar_e.text == "")
|
||||
|
||||
parent_ar = parent_ar_e.text.to_i
|
||||
|
||||
if counters[:vnet][parent_vnet][:ar_leases][parent_ar].nil?
|
||||
log_error(
|
||||
"VNet #{row[:oid]} is using parent "<<
|
||||
"VNet #{parent_vnet}, AR #{parent_ar}, "<<
|
||||
"but the AR does not exist", false)
|
||||
end
|
||||
|
||||
# MAC
|
||||
first_mac = mac_s_to_i(ar.at_xpath("MAC").text)
|
||||
|
||||
# IP
|
||||
first_ip = nil
|
||||
if (!ar.at_xpath("IP").nil?)
|
||||
first_ip = IPAddr.new(ar.at_xpath("IP").text.strip, Socket::AF_INET)
|
||||
end
|
||||
|
||||
# IP6
|
||||
global_prefix = nil
|
||||
if !ar.at_xpath("GLOBAL_PREFIX").nil?
|
||||
global_prefix = ip6_prefix_s_to_i(
|
||||
ar.at_xpath("GLOBAL_PREFIX").text)
|
||||
end
|
||||
|
||||
ula_prefix = nil
|
||||
if !ar.at_xpath("ULA_PREFIX").nil?
|
||||
ula_prefix = ip6_prefix_s_to_i(
|
||||
ar.at_xpath("ULA_PREFIX").text)
|
||||
end
|
||||
|
||||
link_prefix = nil
|
||||
type = ar.at_xpath("TYPE").text
|
||||
if ( type == "IP6" || type == "IP4_6" )
|
||||
link_prefix = 0xfe80000000000000
|
||||
end
|
||||
|
||||
# Parent vnet has a lease for each address of this reservation
|
||||
ar.at_xpath("SIZE").text.to_i.times do |index|
|
||||
|
||||
lease = {
|
||||
:ip => nil,
|
||||
:ip6_global => nil,
|
||||
:ip6_link => nil,
|
||||
:ip6_ula => nil,
|
||||
:mac => nil,
|
||||
:vm => nil,
|
||||
:vnet => row[:oid],
|
||||
:vrouter => nil
|
||||
}
|
||||
|
||||
#MAC
|
||||
mac = (first_mac & 0xFFFF00000000) +
|
||||
(((first_mac & 0xFFFFFFFF) + index) % 0x100000000)
|
||||
lease[:mac] = mac_i_to_s(mac)
|
||||
|
||||
# IP
|
||||
if (!first_ip.nil?)
|
||||
lease[:ip] = IPAddr.new(first_ip.to_i + index,
|
||||
Socket::AF_INET).to_s
|
||||
end
|
||||
|
||||
# IP6
|
||||
ip6_suffix = mac_to_ip6_suffix(mac)
|
||||
|
||||
if (!global_prefix.nil?)
|
||||
lease[:ip6_global] = IPAddr.new(
|
||||
(global_prefix << 64) | ip6_suffix,
|
||||
Socket::AF_INET6 ).to_s
|
||||
end
|
||||
|
||||
if (!ula_prefix.nil?)
|
||||
lease[:ip6_ula] = IPAddr.new(
|
||||
(ula_prefix << 64) | ip6_suffix,
|
||||
Socket::AF_INET6 ).to_s
|
||||
end
|
||||
|
||||
if (!link_prefix.nil?)
|
||||
lease[:ip6_link] = IPAddr.new(
|
||||
(link_prefix << 64) | ip6_suffix,
|
||||
Socket::AF_INET6 ).to_s
|
||||
end
|
||||
|
||||
counters[:vnet][parent_vnet][
|
||||
:ar_leases][parent_ar][mac] = lease
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def check_network
|
||||
@fixes_network = {}
|
||||
|
||||
@db[:network_pool].each do |row|
|
||||
doc = nokogiri_doc(row[:body])
|
||||
oid = row[:oid]
|
||||
|
||||
used_leases = doc.root.at_xpath("USED_LEASES").text.to_i
|
||||
new_used_leases = 0
|
||||
|
||||
counter_no_ar = counters[:vnet][row[:oid]][:no_ar_leases]
|
||||
|
||||
counters[:vnet][row[:oid]][:ar_leases].each do |ar_id, counter_ar|
|
||||
error = false
|
||||
|
||||
net_ar = doc.root.at_xpath("AR_POOL/AR[AR_ID=#{ar_id}]")
|
||||
|
||||
if (net_ar.nil?)
|
||||
# TODO shouldn't happen?
|
||||
end
|
||||
|
||||
# MAC
|
||||
first_mac = mac_s_to_i(net_ar.at_xpath("MAC").text)
|
||||
|
||||
# IP
|
||||
first_ip = nil
|
||||
if !net_ar.at_xpath("IP").nil?
|
||||
first_ip_st = net_ar.at_xpath("IP").text
|
||||
|
||||
if (first_ip_st != first_ip_st.strip)
|
||||
log_error("VNet #{oid} AR #{ar_id} "<<
|
||||
"IP \"#{first_ip_st}\" contains whitespaces")
|
||||
error = true
|
||||
|
||||
first_ip_st.strip!
|
||||
|
||||
net_ar.at_xpath("IP").content = first_ip_st
|
||||
end
|
||||
|
||||
first_ip = IPAddr.new(first_ip_st, Socket::AF_INET)
|
||||
end
|
||||
|
||||
# IP6
|
||||
global_prefix = nil
|
||||
if !net_ar.at_xpath("GLOBAL_PREFIX").nil?
|
||||
global_prefix = ip6_prefix_s_to_i(
|
||||
net_ar.at_xpath("GLOBAL_PREFIX").text)
|
||||
end
|
||||
|
||||
ula_prefix = nil
|
||||
if !net_ar.at_xpath("ULA_PREFIX").nil?
|
||||
ula_prefix = ip6_prefix_s_to_i(
|
||||
net_ar.at_xpath("ULA_PREFIX").text)
|
||||
end
|
||||
|
||||
link_prefix = nil
|
||||
type = net_ar.at_xpath("TYPE").text
|
||||
if ( type == "IP6" || type == "IP4_6" )
|
||||
link_prefix = 0xfe80000000000000
|
||||
end
|
||||
|
||||
# Allocated leases
|
||||
allocated_e = net_ar.at_xpath("ALLOCATED")
|
||||
|
||||
allocated = allocated_e.nil? ? "" : allocated_e.text
|
||||
|
||||
leases = allocated.scan(/(\d+) (\d+)/)
|
||||
|
||||
new_leases = []
|
||||
|
||||
leases.each do |lease_str|
|
||||
index = lease_str[0].to_i
|
||||
binary_magic = lease_str[1].to_i
|
||||
|
||||
lease = {
|
||||
:ip => nil,
|
||||
:ip6_global => nil,
|
||||
:ip6_link => nil,
|
||||
:ip6_ula => nil,
|
||||
:mac => nil,
|
||||
:vm => nil,
|
||||
:vnet => nil,
|
||||
:vrouter => nil
|
||||
}
|
||||
|
||||
# MAC
|
||||
mac = (first_mac & 0xFFFF00000000) +
|
||||
(((first_mac & 0xFFFFFFFF) + index) % 0x100000000)
|
||||
|
||||
lease[:mac] = mac_i_to_s(mac)
|
||||
|
||||
# IP
|
||||
if (!first_ip.nil?)
|
||||
lease[:ip] = IPAddr.new(first_ip.to_i + index,
|
||||
Socket::AF_INET).to_s
|
||||
end
|
||||
|
||||
# IP6
|
||||
ip6_suffix = mac_to_ip6_suffix(mac)
|
||||
|
||||
if (!global_prefix.nil?)
|
||||
lease[:ip6_global] = IPAddr.new(
|
||||
(global_prefix << 64) | ip6_suffix,
|
||||
Socket::AF_INET6 ).to_s
|
||||
end
|
||||
|
||||
if (!ula_prefix.nil?)
|
||||
lease[:ip6_ula] = IPAddr.new(
|
||||
(ula_prefix << 64) | ip6_suffix,
|
||||
Socket::AF_INET6 ).to_s
|
||||
end
|
||||
|
||||
if (!link_prefix.nil?)
|
||||
lease[:ip6_link] = IPAddr.new(
|
||||
(link_prefix << 64) | ip6_suffix,
|
||||
Socket::AF_INET6 ).to_s
|
||||
end
|
||||
|
||||
# OID
|
||||
lease_oid = binary_magic & 0x00000000FFFFFFFF
|
||||
lease_obj = ""
|
||||
|
||||
if (binary_magic & VM_BIN != 0)
|
||||
lease[:vm] = lease_oid
|
||||
lease_obj = "VM"
|
||||
elsif (binary_magic & NET_BIN != 0)
|
||||
lease[:vnet] = lease_oid
|
||||
lease_obj = "VNet"
|
||||
else #(binary_magic & VROUTER_BIN != 0)
|
||||
lease[:vrouter] = lease_oid
|
||||
lease_obj = "VRouter"
|
||||
end
|
||||
|
||||
counter_lease = counter_ar[mac]
|
||||
counter_ar.delete(mac)
|
||||
|
||||
if counter_lease.nil?
|
||||
counter_lease = counter_no_ar[mac]
|
||||
counter_no_ar.delete(mac)
|
||||
end
|
||||
|
||||
if counter_lease.nil?
|
||||
if(lease[:vm] != HOLD)
|
||||
log_error(
|
||||
"VNet #{oid} AR #{ar_id} has leased #{lease_to_s(lease)} "<<
|
||||
"to #{lease_obj} #{lease_oid}, but it is actually free")
|
||||
|
||||
error = true
|
||||
else
|
||||
new_leases << lease_str
|
||||
end
|
||||
else
|
||||
if counter_lease != lease
|
||||
|
||||
# Things that can be fixed
|
||||
if (counter_lease[:vm] != lease[:vm] ||
|
||||
counter_lease[:vnet] != lease[:vnet] ||
|
||||
counter_lease[:vrouter] != lease[:vrouter])
|
||||
|
||||
new_lease_obj = ""
|
||||
new_lease_oid = 0
|
||||
new_binary_magic = 0
|
||||
|
||||
if !counter_lease[:vm].nil?
|
||||
new_lease_obj = "VM"
|
||||
new_lease_oid = counter_lease[:vm].to_i
|
||||
|
||||
new_binary_magic = (VM_BIN |
|
||||
(new_lease_oid & 0xFFFFFFFF))
|
||||
elsif !counter_lease[:vnet].nil?
|
||||
new_lease_obj = "VNet"
|
||||
new_lease_oid = counter_lease[:vnet].to_i
|
||||
|
||||
new_binary_magic = (NET_BIN |
|
||||
(new_lease_oid & 0xFFFFFFFF))
|
||||
else #if !counter_lease[:vrouter].nil?
|
||||
new_lease_obj = "VRouter"
|
||||
new_lease_oid = counter_lease[:vrouter].to_i
|
||||
|
||||
new_binary_magic = (VROUTER_BIN |
|
||||
(new_lease_oid & 0xFFFFFFFF))
|
||||
end
|
||||
|
||||
if (lease[:vm] == HOLD)
|
||||
log_error(
|
||||
"VNet #{oid} AR #{ar_id} has lease "<<
|
||||
"#{lease_to_s(lease)} on hold, but it is "<<
|
||||
"actually used by "<<
|
||||
"#{new_lease_obj} #{new_lease_oid}")
|
||||
error = true
|
||||
else
|
||||
log_error(
|
||||
"VNet #{oid} AR #{ar_id} has leased #{lease_to_s(lease)} "<<
|
||||
"to #{lease_obj} #{lease_oid}, but it is "<<
|
||||
"actually used by "<<
|
||||
"#{new_lease_obj} #{new_lease_oid}")
|
||||
error = true
|
||||
end
|
||||
|
||||
lease_str[1] = new_binary_magic.to_s
|
||||
end
|
||||
|
||||
# Things that can't be fixed
|
||||
|
||||
[:ip, :ip6_global, :ip6_link, :ip6_ula].each do |key|
|
||||
if (counter_lease[key] != lease[key])
|
||||
log_error(
|
||||
"VNet #{oid} AR #{ar_id} has a wrong "<<
|
||||
"lease for "<<
|
||||
"#{lease_obj} #{lease_oid}. #{key.to_s.upcase} "<<
|
||||
"does not match: "<<
|
||||
"#{counter_lease[key]} != #{lease[key]}. "<<
|
||||
"This can't be fixed", false)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
new_leases << lease_str
|
||||
end
|
||||
end
|
||||
|
||||
counter_ar.each do |mac, counter_lease|
|
||||
index = ((mac & 0xFFFFFFFF) - (first_mac & 0xFFFFFFFF) ) % 0x100000000
|
||||
|
||||
new_lease_obj = ""
|
||||
new_lease_oid = 0
|
||||
new_binary_magic = 0
|
||||
|
||||
if !counter_lease[:vm].nil?
|
||||
new_lease_obj = "VM"
|
||||
new_lease_oid = counter_lease[:vm].to_i
|
||||
|
||||
new_binary_magic = (VM_BIN |
|
||||
(new_lease_oid & 0xFFFFFFFF))
|
||||
elsif !counter_lease[:vnet].nil?
|
||||
new_lease_obj = "VNet"
|
||||
new_lease_oid = counter_lease[:vnet].to_i
|
||||
|
||||
new_binary_magic = (NET_BIN |
|
||||
(new_lease_oid & 0xFFFFFFFF))
|
||||
else #if !counter_lease[:vrouter].nil?
|
||||
new_lease_obj = "VRouter"
|
||||
new_lease_oid = counter_lease[:vrouter].to_i
|
||||
|
||||
new_binary_magic = (VROUTER_BIN |
|
||||
(new_lease_oid & 0xFFFFFFFF))
|
||||
end
|
||||
|
||||
log_error("VNet #{oid} AR #{ar_id} does not have a lease "<<
|
||||
"for #{mac_i_to_s(mac)}, but it is in use by "<<
|
||||
"#{new_lease_obj} #{new_lease_oid}")
|
||||
|
||||
error = true
|
||||
|
||||
new_leases << [index.to_s, new_binary_magic.to_s]
|
||||
end
|
||||
|
||||
new_used_leases += new_leases.size
|
||||
|
||||
if new_leases.size > 0
|
||||
allocated_e.content = " #{new_leases.join(" ")}"
|
||||
else
|
||||
allocated_e.remove if !allocated_e.nil?
|
||||
end
|
||||
end
|
||||
|
||||
if (new_used_leases != used_leases)
|
||||
log_error("VNet #{oid} has #{used_leases} used leases, "<<
|
||||
"but it is actually #{new_used_leases}")
|
||||
|
||||
error = true
|
||||
|
||||
doc.root.at_xpath("USED_LEASES").content =
|
||||
new_used_leases.to_s
|
||||
end
|
||||
|
||||
counter_no_ar.each do |mac, counter_lease|
|
||||
log_error("VM #{counter_lease[:vm]} has a lease from "<<
|
||||
"VNet #{oid}, but it could not be matched to any AR", false)
|
||||
end
|
||||
|
||||
vn_mad_e = doc.root.at_xpath("VN_MAD")
|
||||
if vn_mad_e.nil?
|
||||
log_error("VNet #{oid} VN_MAD element is missing", false)
|
||||
else
|
||||
vn_mad = vn_mad_e.text
|
||||
vn_mad_tmpl_e = doc.root.at_xpath("TEMPLATE/VN_MAD")
|
||||
|
||||
if (vn_mad_tmpl_e.nil? || vn_mad_tmpl_e.text != vn_mad)
|
||||
log_error("VNet #{oid} VN_MAD element is missing from the TEMPLATE")
|
||||
|
||||
error = true
|
||||
|
||||
doc.root.at_xpath("TEMPLATE").add_child(
|
||||
doc.create_element("VN_MAD")).content = vn_mad
|
||||
end
|
||||
end
|
||||
|
||||
@fixes_network[oid] = doc.root.to_s if error
|
||||
end
|
||||
end
|
||||
|
||||
def fix_network
|
||||
@db.transaction do
|
||||
@fixes_network.each do |id, body|
|
||||
@db[:network_pool].where(oid: id).update(body: body)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
63
src/onedb/fsck/pool_control.rb
Normal file
63
src/onedb/fsck/pool_control.rb
Normal file
@ -0,0 +1,63 @@
|
||||
|
||||
module OneDBFsck
|
||||
def check_pool_control
|
||||
@fixes_pool_control = {}
|
||||
|
||||
tables.each do |table|
|
||||
max_oid = -1
|
||||
|
||||
@db.fetch("SELECT MAX(oid) FROM #{table}") do |row|
|
||||
max_oid = row[:"MAX(oid)"].to_i
|
||||
end
|
||||
|
||||
# max(oid) will return 0 if there is none,
|
||||
# or if the max is actually 0. Check this:
|
||||
if ( max_oid == 0 )
|
||||
max_oid = -1
|
||||
|
||||
@db.fetch("SELECT oid FROM #{table} WHERE oid=0") do |row|
|
||||
max_oid = 0
|
||||
end
|
||||
end
|
||||
|
||||
control_oid = -1
|
||||
|
||||
@db.fetch("SELECT last_oid FROM pool_control WHERE tablename='#{table}'") do |row|
|
||||
control_oid = row[:last_oid].to_i
|
||||
end
|
||||
|
||||
if ( max_oid > control_oid )
|
||||
msg = "pool_control for table #{table} has last_oid #{control_oid}, but it is #{max_oid}"
|
||||
|
||||
if control_oid != -1
|
||||
if db_version[:is_slave] && federated_tables.include?(table)
|
||||
log_error(msg, false)
|
||||
log_msg("^ Needs to be fixed in the master OpenNebula")
|
||||
else
|
||||
log_error(msg)
|
||||
# @db.run("UPDATE pool_control SET last_oid=#{max_oid} WHERE tablename='#{table}'")
|
||||
@fixes_pool_control[table] = max_oid
|
||||
end
|
||||
else
|
||||
# @db[:pool_control].insert(
|
||||
# :tablename => table,
|
||||
# :last_oid => max_oid)
|
||||
@fixes_pool_control[table] = max_oid
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def fix_pool_control
|
||||
db = @db[:image_pool]
|
||||
|
||||
@db.transaction do
|
||||
@fixes_pool_control.each do |name, last_oid|
|
||||
# If it can not update use insert
|
||||
if 1 != db.where(tablename: name).update(last_oid: last_oid)
|
||||
db.insert(tablename: name, last_oid: last_oid)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
406
src/onedb/fsck/quotas.rb
Normal file
406
src/onedb/fsck/quotas.rb
Normal file
@ -0,0 +1,406 @@
|
||||
|
||||
module OneDBFsck
|
||||
def check_fix_user_quotas
|
||||
# This block is not needed for now
|
||||
=begin
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT oid FROM user_pool") do |row|
|
||||
found = false
|
||||
|
||||
@db.fetch("SELECT user_oid FROM user_quotas WHERE user_oid=#{row[:oid]}") do |q_row|
|
||||
found = true
|
||||
end
|
||||
|
||||
if !found
|
||||
log_error("User #{row[:oid]} does not have a quotas entry")
|
||||
|
||||
@db.run "INSERT INTO user_quotas VALUES(#{row[:oid]},'<QUOTAS><ID>#{row[:oid]}</ID><DATASTORE_QUOTA></DATASTORE_QUOTA><NETWORK_QUOTA></NETWORK_QUOTA><VM_QUOTA></VM_QUOTA><IMAGE_QUOTA></IMAGE_QUOTA></QUOTAS>');"
|
||||
end
|
||||
end
|
||||
end
|
||||
=end
|
||||
@db.run "ALTER TABLE user_quotas RENAME TO old_user_quotas;"
|
||||
create_table(:user_quotas)
|
||||
|
||||
@db.transaction do
|
||||
# oneadmin does not have quotas
|
||||
@db.fetch("SELECT * FROM old_user_quotas WHERE user_oid=0") do |row|
|
||||
@db[:user_quotas].insert(row)
|
||||
end
|
||||
|
||||
@db.fetch("SELECT * FROM old_user_quotas WHERE user_oid>0") do |row|
|
||||
doc = Nokogiri::XML(row[:body],nil,NOKOGIRI_ENCODING){|c| c.default_xml.noblanks}
|
||||
|
||||
calculate_quotas(doc, "uid=#{row[:user_oid]}", "User")
|
||||
|
||||
@db[:user_quotas].insert(
|
||||
:user_oid => row[:user_oid],
|
||||
:body => doc.root.to_s)
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_user_quotas;"
|
||||
end
|
||||
|
||||
def check_fix_group_quotas
|
||||
# This block is not needed for now
|
||||
=begin
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT oid FROM group_pool") do |row|
|
||||
found = false
|
||||
|
||||
@db.fetch("SELECT group_oid FROM group_quotas WHERE group_oid=#{row[:oid]}") do |q_row|
|
||||
found = true
|
||||
end
|
||||
|
||||
if !found
|
||||
log_error("Group #{row[:oid]} does not have a quotas entry")
|
||||
|
||||
@db.run "INSERT INTO group_quotas VALUES(#{row[:oid]},'<QUOTAS><ID>#{row[:oid]}</ID><DATASTORE_QUOTA></DATASTORE_QUOTA><NETWORK_QUOTA></NETWORK_QUOTA><VM_QUOTA></VM_QUOTA><IMAGE_QUOTA></IMAGE_QUOTA></QUOTAS>');"
|
||||
end
|
||||
end
|
||||
end
|
||||
=end
|
||||
@db.run "ALTER TABLE group_quotas RENAME TO old_group_quotas;"
|
||||
create_table(:group_quotas)
|
||||
|
||||
@db.transaction do
|
||||
# oneadmin does not have quotas
|
||||
@db.fetch("SELECT * FROM old_group_quotas WHERE group_oid=0") do |row|
|
||||
@db[:group_quotas].insert(row)
|
||||
end
|
||||
|
||||
@db.fetch("SELECT * FROM old_group_quotas WHERE group_oid>0") do |row|
|
||||
doc = Nokogiri::XML(row[:body],nil,NOKOGIRI_ENCODING){|c| c.default_xml.noblanks}
|
||||
|
||||
calculate_quotas(doc, "gid=#{row[:group_oid]}", "Group")
|
||||
|
||||
@db[:group_quotas].insert(
|
||||
:group_oid => row[:group_oid],
|
||||
:body => doc.root.to_s)
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_group_quotas;"
|
||||
end
|
||||
|
||||
def calculate_quotas(doc, where_filter, resource)
|
||||
oid = doc.root.at_xpath("ID").text.to_i
|
||||
|
||||
# VM quotas
|
||||
cpu_used = 0
|
||||
mem_used = 0
|
||||
vms_used = 0
|
||||
sys_used = 0
|
||||
|
||||
# VNet quotas
|
||||
vnet_usage = {}
|
||||
|
||||
# Image quotas
|
||||
img_usage = {}
|
||||
datastore_usage = {}
|
||||
|
||||
@db.fetch("SELECT body FROM vm_pool WHERE #{where_filter} AND state<>6") do |vm_row|
|
||||
vmdoc = nokogiri_doc(vm_row[:body])
|
||||
|
||||
# VM quotas
|
||||
vmdoc.root.xpath("TEMPLATE/CPU").each { |e|
|
||||
# truncate to 2 decimals
|
||||
cpu = (e.text.to_f * 100).to_i
|
||||
cpu_used += cpu
|
||||
}
|
||||
|
||||
vmdoc.root.xpath("TEMPLATE/MEMORY").each { |e|
|
||||
mem_used += e.text.to_i
|
||||
}
|
||||
|
||||
vmdoc.root.xpath("TEMPLATE/DISK").each { |e|
|
||||
type = ""
|
||||
|
||||
e.xpath("TYPE").each { |t_elem|
|
||||
type = t_elem.text.upcase
|
||||
}
|
||||
|
||||
size = 0
|
||||
|
||||
if !e.at_xpath("SIZE").nil?
|
||||
size = e.at_xpath("SIZE").text.to_i
|
||||
end
|
||||
|
||||
if ( type == "SWAP" || type == "FS")
|
||||
sys_used += size
|
||||
else
|
||||
if !e.at_xpath("CLONE").nil?
|
||||
clone = (e.at_xpath("CLONE").text.upcase == "YES")
|
||||
|
||||
target = nil
|
||||
|
||||
if clone
|
||||
target = e.at_xpath("CLONE_TARGET").text if !e.at_xpath("CLONE_TARGET").nil?
|
||||
else
|
||||
target = e.at_xpath("LN_TARGET").text if !e.at_xpath("LN_TARGET").nil?
|
||||
end
|
||||
|
||||
if !target.nil? && target == "SYSTEM"
|
||||
sys_used += size
|
||||
|
||||
if !e.at_xpath("DISK_SNAPSHOT_TOTAL_SIZE").nil?
|
||||
sys_used += e.at_xpath("DISK_SNAPSHOT_TOTAL_SIZE").text.to_i
|
||||
end
|
||||
elsif !target.nil? && target == "SELF"
|
||||
datastore_id = e.at_xpath("DATASTORE_ID").text
|
||||
datastore_usage[datastore_id] ||= 0
|
||||
datastore_usage[datastore_id] += size
|
||||
|
||||
if !e.at_xpath("DISK_SNAPSHOT_TOTAL_SIZE").nil?
|
||||
datastore_usage[datastore_id] += e.at_xpath("DISK_SNAPSHOT_TOTAL_SIZE").text.to_i
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
}
|
||||
|
||||
vms_used += 1
|
||||
|
||||
# VNet quotas
|
||||
vmdoc.root.xpath("TEMPLATE/NIC/NETWORK_ID").each { |e|
|
||||
vnet_usage[e.text] = 0 if vnet_usage[e.text].nil?
|
||||
vnet_usage[e.text] += 1
|
||||
}
|
||||
|
||||
# Image quotas
|
||||
vmdoc.root.xpath("TEMPLATE/DISK/IMAGE_ID").each { |e|
|
||||
img_usage[e.text] = 0 if img_usage[e.text].nil?
|
||||
img_usage[e.text] += 1
|
||||
}
|
||||
end
|
||||
|
||||
@db.fetch("SELECT body FROM vrouter_pool WHERE #{where_filter}") do |vrouter_row|
|
||||
vrouter_doc = Nokogiri::XML(vrouter_row[:body],nil,NOKOGIRI_ENCODING){|c| c.default_xml.noblanks}
|
||||
|
||||
# VNet quotas
|
||||
vrouter_doc.root.xpath("TEMPLATE/NIC").each { |nic|
|
||||
net_id = nil
|
||||
nic.xpath("NETWORK_ID").each do |nid|
|
||||
net_id = nid.text
|
||||
end
|
||||
|
||||
floating = false
|
||||
|
||||
nic.xpath("FLOATING_IP").each do |floating_e|
|
||||
floating = (floating_e.text.upcase == "YES")
|
||||
end
|
||||
|
||||
if !net_id.nil? && floating
|
||||
vnet_usage[net_id] = 0 if vnet_usage[net_id].nil?
|
||||
|
||||
vnet_usage[net_id] += 1
|
||||
end
|
||||
}
|
||||
end
|
||||
|
||||
# VM quotas
|
||||
|
||||
vm_elem = nil
|
||||
doc.root.xpath("VM_QUOTA/VM").each { |e| vm_elem = e }
|
||||
|
||||
if vm_elem.nil?
|
||||
doc.root.xpath("VM_QUOTA").each { |e| e.remove }
|
||||
|
||||
vm_quota = doc.root.add_child(doc.create_element("VM_QUOTA"))
|
||||
vm_elem = vm_quota.add_child(doc.create_element("VM"))
|
||||
|
||||
vm_elem.add_child(doc.create_element("CPU")).content = "-1"
|
||||
vm_elem.add_child(doc.create_element("CPU_USED")).content = "0"
|
||||
|
||||
vm_elem.add_child(doc.create_element("MEMORY")).content = "-1"
|
||||
vm_elem.add_child(doc.create_element("MEMORY_USED")).content = "0"
|
||||
|
||||
vm_elem.add_child(doc.create_element("VMS")).content = "-1"
|
||||
vm_elem.add_child(doc.create_element("VMS_USED")).content = "0"
|
||||
|
||||
vm_elem.add_child(doc.create_element("SYSTEM_DISK_SIZE")).content = "-1"
|
||||
vm_elem.add_child(doc.create_element("SYSTEM_DISK_SIZE_USED")).content = "0"
|
||||
end
|
||||
|
||||
vm_elem.xpath("CPU_USED").each { |e|
|
||||
# Because of bug http://dev.opennebula.org/issues/1567 the element
|
||||
# may contain a float number in scientific notation.
|
||||
|
||||
# Check if the float value or the string representation mismatch,
|
||||
# but ignoring the precision
|
||||
|
||||
cpu_used = (cpu_used / 100.0)
|
||||
|
||||
different = ( e.text.to_f != cpu_used ||
|
||||
![sprintf('%.2f', cpu_used), sprintf('%.1f', cpu_used), sprintf('%.0f', cpu_used)].include?(e.text) )
|
||||
|
||||
cpu_used_str = sprintf('%.2f', cpu_used)
|
||||
|
||||
if different
|
||||
log_error("#{resource} #{oid} quotas: CPU_USED has #{e.text} \tis\t#{cpu_used_str}")
|
||||
e.content = cpu_used_str
|
||||
end
|
||||
}
|
||||
|
||||
vm_elem.xpath("MEMORY_USED").each { |e|
|
||||
if e.text != mem_used.to_s
|
||||
log_error("#{resource} #{oid} quotas: MEMORY_USED has #{e.text} \tis\t#{mem_used}")
|
||||
e.content = mem_used.to_s
|
||||
end
|
||||
}
|
||||
|
||||
vm_elem.xpath("VMS_USED").each { |e|
|
||||
if e.text != vms_used.to_s
|
||||
log_error("#{resource} #{oid} quotas: VMS_USED has #{e.text} \tis\t#{vms_used}")
|
||||
e.content = vms_used.to_s
|
||||
end
|
||||
}
|
||||
|
||||
vm_elem.xpath("SYSTEM_DISK_SIZE_USED").each { |e|
|
||||
if e.text != sys_used.to_s
|
||||
log_error("#{resource} #{oid} quotas: SYSTEM_DISK_SIZE_USED has #{e.text} \tis\t#{sys_used}")
|
||||
e.content = sys_used.to_s
|
||||
end
|
||||
}
|
||||
|
||||
# VNet quotas
|
||||
|
||||
net_quota = nil
|
||||
doc.root.xpath("NETWORK_QUOTA").each { |e| net_quota = e }
|
||||
|
||||
if net_quota.nil?
|
||||
net_quota = doc.root.add_child(doc.create_element("NETWORK_QUOTA"))
|
||||
end
|
||||
|
||||
net_quota.xpath("NETWORK").each { |net_elem|
|
||||
vnet_id = net_elem.at_xpath("ID").text
|
||||
|
||||
leases_used = vnet_usage.delete(vnet_id)
|
||||
|
||||
leases_used = 0 if leases_used.nil?
|
||||
|
||||
net_elem.xpath("LEASES_USED").each { |e|
|
||||
if e.text != leases_used.to_s
|
||||
log_error("#{resource} #{oid} quotas: VNet #{vnet_id}\tLEASES_USED has #{e.text} \tis\t#{leases_used}")
|
||||
e.content = leases_used.to_s
|
||||
end
|
||||
}
|
||||
}
|
||||
|
||||
vnet_usage.each { |vnet_id, leases_used|
|
||||
log_error("#{resource} #{oid} quotas: VNet #{vnet_id}\tLEASES_USED has 0 \tis\t#{leases_used}")
|
||||
|
||||
new_elem = net_quota.add_child(doc.create_element("NETWORK"))
|
||||
|
||||
new_elem.add_child(doc.create_element("ID")).content = vnet_id
|
||||
new_elem.add_child(doc.create_element("LEASES")).content = "-1"
|
||||
new_elem.add_child(doc.create_element("LEASES_USED")).content = leases_used.to_s
|
||||
}
|
||||
|
||||
# Image quotas
|
||||
|
||||
img_quota = nil
|
||||
doc.root.xpath("IMAGE_QUOTA").each { |e| img_quota = e }
|
||||
|
||||
if img_quota.nil?
|
||||
img_quota = doc.root.add_child(doc.create_element("IMAGE_QUOTA"))
|
||||
end
|
||||
|
||||
img_quota.xpath("IMAGE").each { |img_elem|
|
||||
img_id = img_elem.at_xpath("ID").text
|
||||
|
||||
rvms = img_usage.delete(img_id)
|
||||
|
||||
rvms = 0 if rvms.nil?
|
||||
|
||||
img_elem.xpath("RVMS_USED").each { |e|
|
||||
if e.text != rvms.to_s
|
||||
log_error("#{resource} #{oid} quotas: Image #{img_id}\tRVMS has #{e.text} \tis\t#{rvms}")
|
||||
e.content = rvms.to_s
|
||||
end
|
||||
}
|
||||
}
|
||||
|
||||
img_usage.each { |img_id, rvms|
|
||||
log_error("#{resource} #{oid} quotas: Image #{img_id}\tRVMS has 0 \tis\t#{rvms}")
|
||||
|
||||
new_elem = img_quota.add_child(doc.create_element("IMAGE"))
|
||||
|
||||
new_elem.add_child(doc.create_element("ID")).content = img_id
|
||||
new_elem.add_child(doc.create_element("RVMS")).content = "-1"
|
||||
new_elem.add_child(doc.create_element("RVMS_USED")).content = rvms.to_s
|
||||
}
|
||||
|
||||
# Datastore quotas
|
||||
ds_usage = {}
|
||||
|
||||
@db.fetch("SELECT body FROM image_pool WHERE #{where_filter}") do |img_row|
|
||||
img_doc = Nokogiri::XML(img_row[:body],nil,NOKOGIRI_ENCODING){|c| c.default_xml.noblanks}
|
||||
|
||||
img_doc.root.xpath("DATASTORE_ID").each { |e|
|
||||
ds_usage[e.text] = [0,0] if ds_usage[e.text].nil?
|
||||
ds_usage[e.text][0] += 1
|
||||
|
||||
img_doc.root.xpath("SIZE").each { |size|
|
||||
ds_usage[e.text][1] += size.text.to_i
|
||||
}
|
||||
|
||||
img_doc.root.xpath("SNAPSHOTS/SNAPSHOT/SIZE").each { |size|
|
||||
ds_usage[e.text][1] += size.text.to_i
|
||||
}
|
||||
}
|
||||
end
|
||||
|
||||
ds_quota = nil
|
||||
doc.root.xpath("DATASTORE_QUOTA").each { |e| ds_quota = e }
|
||||
|
||||
if ds_quota.nil?
|
||||
ds_quota = doc.root.add_child(doc.create_element("DATASTORE_QUOTA"))
|
||||
end
|
||||
|
||||
ds_quota.xpath("DATASTORE").each { |ds_elem|
|
||||
ds_id = ds_elem.at_xpath("ID").text
|
||||
|
||||
images_used,size_used = ds_usage.delete(ds_id)
|
||||
|
||||
images_used = 0 if images_used.nil?
|
||||
size_used = 0 if size_used.nil?
|
||||
|
||||
cloned_usage = datastore_usage[ds_id] || 0
|
||||
size_used += cloned_usage
|
||||
|
||||
ds_elem.xpath("IMAGES_USED").each { |e|
|
||||
if e.text != images_used.to_s
|
||||
log_error("#{resource} #{oid} quotas: Datastore #{ds_id}\tIMAGES_USED has #{e.text} \tis\t#{images_used}")
|
||||
e.content = images_used.to_s
|
||||
end
|
||||
}
|
||||
|
||||
ds_elem.xpath("SIZE_USED").each { |e|
|
||||
if e.text != size_used.to_s
|
||||
log_error("#{resource} #{oid} quotas: Datastore #{ds_id}\tSIZE_USED has #{e.text} \tis\t#{size_used}")
|
||||
e.content = size_used.to_s
|
||||
end
|
||||
}
|
||||
}
|
||||
|
||||
ds_usage.each { |ds_id, array|
|
||||
images_used,size_used = array
|
||||
|
||||
log_error("#{resource} #{oid} quotas: Datastore #{ds_id}\tIMAGES_USED has 0 \tis\t#{images_used}")
|
||||
log_error("#{resource} #{oid} quotas: Datastore #{ds_id}\tSIZE_USED has 0 \tis\t#{size_used}")
|
||||
|
||||
new_elem = ds_quota.add_child(doc.create_element("DATASTORE"))
|
||||
|
||||
new_elem.add_child(doc.create_element("ID")).content = ds_id
|
||||
|
||||
new_elem.add_child(doc.create_element("IMAGES")).content = "-1"
|
||||
new_elem.add_child(doc.create_element("IMAGES_USED")).content = images_used.to_s
|
||||
|
||||
new_elem.add_child(doc.create_element("SIZE")).content = "-1"
|
||||
new_elem.add_child(doc.create_element("SIZE_USED")).content = size_used.to_s
|
||||
}
|
||||
end
|
||||
end
|
||||
|
87
src/onedb/fsck/template.rb
Normal file
87
src/onedb/fsck/template.rb
Normal file
@ -0,0 +1,87 @@
|
||||
|
||||
module OneDBFsck
|
||||
def check_template
|
||||
templates_fix = @fixes_template = {}
|
||||
|
||||
@db[:template_pool].each do |row|
|
||||
doc = Nokogiri::XML(row[:body],nil,NOKOGIRI_ENCODING){|c| c.default_xml.noblanks}
|
||||
|
||||
boot = doc.root.at_xpath("TEMPLATE/OS/BOOT")
|
||||
|
||||
if boot.nil? || boot.text.downcase.match(/fd|hd|cdrom|network/).nil?
|
||||
next
|
||||
end
|
||||
|
||||
# Note: this code assumes that disks are ordered in the same order as
|
||||
# their target, and may break boot order if the target is not left
|
||||
# completely to oned.
|
||||
# If, for example, the third disk ends with target="vda",
|
||||
# boot="hd" should be updated to boot="disk2", but it is not
|
||||
|
||||
devs = []
|
||||
|
||||
hd_i = 0
|
||||
cdrom_i = 0
|
||||
network_i = 0
|
||||
|
||||
error = false
|
||||
|
||||
boot.text.split(",").each do |dev|
|
||||
dev.downcase!
|
||||
|
||||
case dev
|
||||
when "hd", "cdrom"
|
||||
index = nil
|
||||
if dev == "hd"
|
||||
index = hd_i
|
||||
hd_i += 1
|
||||
else
|
||||
index = cdrom_i
|
||||
cdrom_i += 1
|
||||
end
|
||||
|
||||
id = get_disk_id(dev, index, doc)
|
||||
if id.nil?
|
||||
log_error("VM Template #{row[:oid]} OS/BOOT contains deprecated format \"#{boot.content}\", but DISK ##{index} of type #{dev} could not be found to fix it automatically", false)
|
||||
error = true
|
||||
end
|
||||
devs.push("disk#{id}")
|
||||
|
||||
when "network"
|
||||
devs.push("nic#{network_i}")
|
||||
network_i += 1
|
||||
|
||||
when "fd"
|
||||
log_error("VM Template #{row[:oid]} OS/BOOT contains deprecated format \"#{boot.content}\", but \"fd\" is not supported anymore and can't be fixed automatically", false)
|
||||
error = true
|
||||
|
||||
else
|
||||
log_error("VM Template #{row[:oid]} OS/BOOT contains deprecated format \"#{boot.content}\", but it can't be parsed to be fixed automatically", false)
|
||||
error = true
|
||||
|
||||
end
|
||||
end
|
||||
|
||||
if error
|
||||
next
|
||||
end
|
||||
|
||||
new_boot = devs.join(",")
|
||||
|
||||
log_error("VM Template #{row[:oid]} OS/BOOT contains deprecated format \"#{boot.content}\", is was updated to #{new_boot}")
|
||||
|
||||
boot.content = new_boot
|
||||
|
||||
templates_fix[row[:oid]] = doc.root.to_s
|
||||
end
|
||||
end
|
||||
|
||||
def fix_template
|
||||
@db.transaction do
|
||||
@fixes_template.each do |id, body|
|
||||
@db[:template_pool].where(oid: id).update(body: body)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
97
src/onedb/fsck/user.rb
Normal file
97
src/onedb/fsck/user.rb
Normal file
@ -0,0 +1,97 @@
|
||||
|
||||
module OneDBFsck
|
||||
def check_user
|
||||
@data_user = { group: {} }
|
||||
group = @data_user[:group]
|
||||
|
||||
@db.fetch("SELECT oid FROM group_pool") do |row|
|
||||
group[row[:oid]] = []
|
||||
end
|
||||
|
||||
@fixes_user = users_fix = {}
|
||||
|
||||
@db.fetch("SELECT oid,body,gid FROM user_pool") do |row|
|
||||
doc = Nokogiri::XML(row[:body],nil,NOKOGIRI_ENCODING){|c| c.default_xml.noblanks}
|
||||
|
||||
gid = doc.root.at_xpath('GID').text.to_i
|
||||
user_gid = gid
|
||||
user_gids = Set.new
|
||||
|
||||
if group[gid].nil?
|
||||
log_error("User #{row[:oid]} has primary group #{gid}, but it does not exist", !db_version[:is_slave])
|
||||
|
||||
user_gid = 1
|
||||
|
||||
doc.root.xpath('GID').each do |e|
|
||||
e.content = "1"
|
||||
end
|
||||
|
||||
doc.root.xpath('GNAME').each do |e|
|
||||
e.content = "users"
|
||||
end
|
||||
|
||||
doc.root.xpath("GROUPS").each { |e|
|
||||
e.xpath("ID[.=#{gid}]").each{|x| x.remove}
|
||||
|
||||
e.add_child(doc.create_element("ID")).content = user_gid.to_s
|
||||
}
|
||||
|
||||
users_fix[row[:oid]] = {:body => doc.root.to_s, :gid => user_gid}
|
||||
end
|
||||
|
||||
doc.root.xpath("GROUPS/ID").each { |e|
|
||||
user_gids.add e.text.to_i
|
||||
}
|
||||
|
||||
if !user_gids.include?(user_gid)
|
||||
log_error("User #{row[:oid]} does not have his primary group #{user_gid} in the list of secondary groups", !db_version[:is_slave])
|
||||
|
||||
doc.root.xpath("GROUPS").each { |e|
|
||||
e.add_child(doc.create_element("ID")).content = user_gid.to_s
|
||||
}
|
||||
|
||||
user_gids.add user_gid.to_i
|
||||
|
||||
users_fix[row[:oid]] = {:body => doc.root.to_s, :gid => user_gid}
|
||||
end
|
||||
|
||||
user_gids.each do |secondary_gid|
|
||||
if group[secondary_gid].nil?
|
||||
log_error("User #{row[:oid]} has secondary group #{secondary_gid}, but it does not exist", !db_version[:is_slave])
|
||||
|
||||
doc.root.xpath("GROUPS").each { |e|
|
||||
e.xpath("ID[.=#{secondary_gid}]").each{|x| x.remove}
|
||||
}
|
||||
|
||||
users_fix[row[:oid]] = {:body => doc.root.to_s, :gid => user_gid}
|
||||
else
|
||||
group[secondary_gid] << row[:oid]
|
||||
end
|
||||
end
|
||||
|
||||
if gid != row[:gid]
|
||||
log_error(
|
||||
"User #{row[:oid]} is in group #{gid}, but the DB "<<
|
||||
"table has GID column #{row[:gid]}", !db_version[:is_slave])
|
||||
|
||||
users_fix[row[:oid]] = {:body => doc.root.to_s, :gid => user_gid}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def fix_user
|
||||
users_fix = @fixes_user
|
||||
|
||||
if !db_version[:is_slave]
|
||||
@db.transaction do
|
||||
users_fix.each do |id, user|
|
||||
@db[:user_pool].where(:oid => id).update(
|
||||
:body => user[:body],
|
||||
:gid => user[:gid])
|
||||
end
|
||||
end
|
||||
elsif !users_fix.empty?
|
||||
log_msg("^ User errors need to be fixed in the master OpenNebula")
|
||||
end
|
||||
end
|
||||
end
|
202
src/onedb/fsck/vm.rb
Normal file
202
src/onedb/fsck/vm.rb
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
module OneDBFsck
|
||||
def check_vm
|
||||
vms_fix = @fixes_vm = {}
|
||||
@fixes_vm_history = {}
|
||||
|
||||
@data_vm = {}
|
||||
cluster_vnc = @data_vm[:vnc] = {}
|
||||
|
||||
# DATA: Aggregate information of the RUNNING vms
|
||||
@db.fetch("SELECT oid,body FROM vm_pool WHERE state<>6") do |row|
|
||||
vm_doc = nokogiri_doc(row[:body])
|
||||
|
||||
state = vm_doc.root.at_xpath('STATE').text.to_i
|
||||
lcm_state = vm_doc.root.at_xpath('LCM_STATE').text.to_i
|
||||
|
||||
# DATA: VNC ports per cluster
|
||||
cid = vm_doc.root.at_xpath("HISTORY_RECORDS/HISTORY[last()]/CID").text.to_i rescue nil
|
||||
port = vm_doc.root.at_xpath('TEMPLATE/GRAPHICS[translate(TYPE,"vnc","VNC")="VNC"]/PORT').text.to_i rescue nil
|
||||
# DATA: TODO: get also spice port
|
||||
|
||||
if cid && port
|
||||
cluster_vnc[cid] ||= Set.new
|
||||
cluster_vnc[cid] << port
|
||||
end
|
||||
|
||||
# DATA: Images used by this VM
|
||||
vm_doc.root.xpath("TEMPLATE/DISK/IMAGE_ID").each do |e|
|
||||
img_id = e.text.to_i
|
||||
|
||||
if counters[:image][img_id].nil?
|
||||
log_error("VM #{row[:oid]} is using Image #{img_id}, but "<<
|
||||
"it does not exist", false)
|
||||
else
|
||||
counters[:image][img_id][:vms].add(row[:oid])
|
||||
end
|
||||
end
|
||||
|
||||
# DATA: VNets used by this VM
|
||||
vm_doc.root.xpath("TEMPLATE/NIC").each do |nic|
|
||||
net_id = nil
|
||||
nic.xpath("NETWORK_ID").each do |nid|
|
||||
net_id = nid.text.to_i
|
||||
end
|
||||
|
||||
if !net_id.nil?
|
||||
if counters[:vnet][net_id].nil?
|
||||
log_error("VM #{row[:oid]} is using VNet #{net_id}, "<<
|
||||
"but it does not exist", false)
|
||||
else
|
||||
mac = nic.at_xpath("MAC").nil? ? nil : nic.at_xpath("MAC").text
|
||||
|
||||
ar_id_e = nic.at_xpath('AR_ID')
|
||||
|
||||
if ar_id_e.nil?
|
||||
if !counters[:vnet][net_id][:no_ar_leases][mac_s_to_i(mac)].nil?
|
||||
log_error("VNet #{net_id} has more than one lease with the same MAC address (#{mac}). "<<
|
||||
"FSCK can't handle this, and consistency is not guaranteed", false)
|
||||
end
|
||||
|
||||
# DATA: IPs per network no ar
|
||||
counters[:vnet][net_id][:no_ar_leases][mac_s_to_i(mac)] = {
|
||||
:ip => nic.at_xpath("IP").nil? ? nil : nic.at_xpath("IP").text,
|
||||
:ip6_global => nic.at_xpath("IP6_GLOBAL").nil? ? nil : nic.at_xpath("IP6_GLOBAL").text,
|
||||
:ip6_link => nic.at_xpath("IP6_LINK").nil? ? nil : nic.at_xpath("IP6_LINK").text,
|
||||
:ip6_ula => nic.at_xpath("IP6_ULA").nil? ? nil : nic.at_xpath("IP6_ULA").text,
|
||||
:mac => mac,
|
||||
:vm => row[:oid],
|
||||
:vnet => nil,
|
||||
:vrouter => nil
|
||||
}
|
||||
else
|
||||
ar_id = ar_id_e.text.to_i
|
||||
|
||||
if counters[:vnet][net_id][:ar_leases][ar_id].nil?
|
||||
log_error("VM #{row[:oid]} is using VNet #{net_id}, AR #{ar_id}, "<<
|
||||
"but the AR does not exist", false)
|
||||
# DATA: why these are not added to counters?
|
||||
else
|
||||
# DATA: IPs per network with ar
|
||||
counters[:vnet][net_id][:ar_leases][ar_id][mac_s_to_i(mac)] = {
|
||||
:ip => nic.at_xpath("IP").nil? ? nil : nic.at_xpath("IP").text,
|
||||
:ip6_global => nic.at_xpath("IP6_GLOBAL").nil? ? nil : nic.at_xpath("IP6_GLOBAL").text,
|
||||
:ip6_link => nic.at_xpath("IP6_LINK").nil? ? nil : nic.at_xpath("IP6_LINK").text,
|
||||
:ip6_ula => nic.at_xpath("IP6_ULA").nil? ? nil : nic.at_xpath("IP6_ULA").text,
|
||||
:mac => mac,
|
||||
:vm => row[:oid],
|
||||
:vnet => nil,
|
||||
:vrouter => nil
|
||||
}
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# See if it's part of a Virtual Router
|
||||
vrouter_e = vm_doc.root.at_xpath("TEMPLATE/VROUTER_ID")
|
||||
|
||||
# DATA: add vrouter counters
|
||||
if !vrouter_e.nil?
|
||||
vr_id = vrouter_e.text.to_i
|
||||
counters_vrouter = counters[:vrouter][vr_id]
|
||||
|
||||
if counters_vrouter.nil?
|
||||
log_error("VM #{row[:oid]} is part of VRouter #{vr_id}, but "<<
|
||||
"it does not exist", false)
|
||||
else
|
||||
counters_vrouter[:vms].add(row[:oid])
|
||||
end
|
||||
end
|
||||
|
||||
# DATA: Host resources
|
||||
|
||||
# Only states that add to Host resources consumption are
|
||||
# ACTIVE, SUSPENDED, POWEROFF
|
||||
next if !([3,5,8].include? state)
|
||||
|
||||
# DATA: Get memory (integer)
|
||||
memory = vm_doc.root.at_xpath("TEMPLATE/MEMORY").text.to_i
|
||||
|
||||
# DATA: Get CPU (float)
|
||||
cpu = vm_doc.root.at_xpath("TEMPLATE/CPU").text.to_f
|
||||
|
||||
# DATA: Get hostid, hostname
|
||||
hid = -1
|
||||
vm_doc.root.xpath("HISTORY_RECORDS/HISTORY[last()]/HID").each { |e|
|
||||
hid = e.text.to_i
|
||||
}
|
||||
|
||||
hostname = ""
|
||||
vm_doc.root.xpath("HISTORY_RECORDS/HISTORY[last()]/HOSTNAME").each { |e|
|
||||
hostname = e.text
|
||||
}
|
||||
|
||||
counters_host = counters[:host][hid]
|
||||
|
||||
if counters_host.nil?
|
||||
log_error("VM #{row[:oid]} is using Host #{hid}, "<<
|
||||
"but it does not exist", false)
|
||||
else
|
||||
# DATA: FIX: hostname is wrong, fix inline
|
||||
if counters_host[:name] != hostname
|
||||
log_error("VM #{row[:oid]} has a wrong hostname for "<<
|
||||
"Host #{hid}, #{hostname}. It will be changed to "<<
|
||||
"#{counters_host[:name]}")
|
||||
|
||||
vm_doc.root.xpath(
|
||||
"HISTORY_RECORDS/HISTORY[last()]/HOSTNAME").each { |e|
|
||||
e.content = counters_host[:name]
|
||||
}
|
||||
|
||||
vms_fix[row[:oid]] = vm_doc.root.to_s
|
||||
end
|
||||
|
||||
# DATA: add resources to host counters
|
||||
counters_host[:memory] += memory
|
||||
counters_host[:cpu] += cpu
|
||||
counters_host[:rvms].add(row[:oid])
|
||||
end
|
||||
|
||||
# DATA: search history for VMMMAD and TMMAD to translate
|
||||
@db.fetch("SELECT * FROM history WHERE vid=#{row[:oid]}") do |hrow|
|
||||
# hdoc = Nokogiri::XML(hrow[:body],nil,NOKOGIRI_ENCODING){|c| c.default_xml.noblanks}
|
||||
hdoc = nokogiri_doc(hrow[:body])
|
||||
|
||||
found = false
|
||||
|
||||
# Rename VMMMAD -> VM_MAD and TMMAD -> TM_MAD
|
||||
hdoc.root.xpath("VMMMAD").each {|e|
|
||||
e.name = "VM_MAD"
|
||||
found = true
|
||||
}
|
||||
|
||||
hdoc.root.xpath("TMMAD").each {|e|
|
||||
e.name = "TM_MAD"
|
||||
found = true
|
||||
}
|
||||
|
||||
# DATA: translate VMMMAD and TMMAD to VM_MAD and TM_MAD
|
||||
if found
|
||||
index = [hrow[:vid], hrow[:seq]]
|
||||
@fixes_vm_history[index] = hdoc.root.to_s
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def fix_vm
|
||||
# DATA: FIX: do vm_pool fixes
|
||||
@db.transaction do
|
||||
@fixes_vm.each do |id, body|
|
||||
@db[:vm_pool].where(:oid => id).update(:body => body)
|
||||
end
|
||||
|
||||
@fixes_vm_history.each do |index, body|
|
||||
vid, seq = index
|
||||
@db[:history].where(vid: vid, seq: seq).update(body: body)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
120
src/onedb/fsck/vrouter.rb
Normal file
120
src/onedb/fsck/vrouter.rb
Normal file
@ -0,0 +1,120 @@
|
||||
|
||||
module OneDBFsck
|
||||
def check_vrouter
|
||||
vrouters_fix = @fixes_vrouter = {}
|
||||
|
||||
# DATA: Aggregate information of the RUNNING vms
|
||||
@db.fetch("SELECT oid,body FROM vrouter_pool") do |row|
|
||||
vrouter_doc = nokogiri_doc(row[:body])
|
||||
|
||||
# DATA: VNets used by this Virtual Router
|
||||
vrouter_doc.root.xpath("TEMPLATE/NIC").each do |nic|
|
||||
net_id = nil
|
||||
nic.xpath("NETWORK_ID").each do |nid|
|
||||
net_id = nid.text.to_i
|
||||
end
|
||||
|
||||
floating = false
|
||||
|
||||
nic.xpath("FLOATING_IP").each do |floating_e|
|
||||
floating = (floating_e.text.upcase == "YES")
|
||||
end
|
||||
|
||||
if !net_id.nil? && floating
|
||||
if counters[:vnet][net_id].nil?
|
||||
log_error("VRouter #{row[:oid]} is using VNet #{net_id}, "<<
|
||||
"but it does not exist", false)
|
||||
else
|
||||
# DATA: this part is copied from "VNets used by this VM"
|
||||
mac = nic.at_xpath("MAC").nil? ? nil : nic.at_xpath("MAC").text
|
||||
|
||||
ar_id_e = nic.at_xpath('AR_ID')
|
||||
|
||||
if ar_id_e.nil?
|
||||
if !counters[:vnet][net_id][:no_ar_leases][mac_s_to_i(mac)].nil?
|
||||
log_error("VNet #{net_id} has more than one lease with the same MAC address (#{mac}). "<<
|
||||
"FSCK can't handle this, and consistency is not guaranteed", false)
|
||||
end
|
||||
|
||||
counters[:vnet][net_id][:no_ar_leases][mac_s_to_i(mac)] = {
|
||||
:ip => nic.at_xpath("IP").nil? ? nil : nic.at_xpath("IP").text,
|
||||
:ip6_global => nic.at_xpath("IP6_GLOBAL").nil? ? nil : nic.at_xpath("IP6_GLOBAL").text,
|
||||
:ip6_link => nic.at_xpath("IP6_LINK").nil? ? nil : nic.at_xpath("IP6_LINK").text,
|
||||
:ip6_ula => nic.at_xpath("IP6_ULA").nil? ? nil : nic.at_xpath("IP6_ULA").text,
|
||||
:mac => mac,
|
||||
:vm => nil,
|
||||
:vnet => nil,
|
||||
:vrouter => row[:oid],
|
||||
}
|
||||
else
|
||||
ar_id = ar_id_e.text.to_i
|
||||
|
||||
if counters[:vnet][net_id][:ar_leases][ar_id].nil?
|
||||
log_error("VRouter #{row[:oid]} is using VNet #{net_id}, AR #{ar_id}, "<<
|
||||
"but the AR does not exist", false)
|
||||
else
|
||||
counters[:vnet][net_id][:ar_leases][ar_id][mac_s_to_i(mac)] = {
|
||||
:ip => nic.at_xpath("IP").nil? ? nil : nic.at_xpath("IP").text,
|
||||
:ip6_global => nic.at_xpath("IP6_GLOBAL").nil? ? nil : nic.at_xpath("IP6_GLOBAL").text,
|
||||
:ip6_link => nic.at_xpath("IP6_LINK").nil? ? nil : nic.at_xpath("IP6_LINK").text,
|
||||
:ip6_ula => nic.at_xpath("IP6_ULA").nil? ? nil : nic.at_xpath("IP6_ULA").text,
|
||||
:mac => mac,
|
||||
:vm => nil,
|
||||
:vnet => nil,
|
||||
:vrouter => row[:oid],
|
||||
}
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# DATA: re-do list of VM IDs per vrouter
|
||||
error = false
|
||||
|
||||
counters_vrouter = counters[:vrouter][row[:oid]]
|
||||
|
||||
vms_elem = vrouter_doc.root.at_xpath("VMS").remove
|
||||
|
||||
vms_new_elem = vrouter_doc.create_element("VMS")
|
||||
vrouter_doc.root.add_child(vms_new_elem)
|
||||
|
||||
counters_vrouter[:vms].each do |id|
|
||||
id_elem = vms_elem.at_xpath("ID[.=#{id}]")
|
||||
|
||||
if id_elem.nil?
|
||||
log_error(
|
||||
"VM #{id} is missing from VRouter #{row[:oid]} VM id list")
|
||||
|
||||
error = true
|
||||
else
|
||||
id_elem.remove
|
||||
end
|
||||
|
||||
vms_new_elem.add_child(vrouter_doc.create_element("ID")).content = id.to_s
|
||||
end
|
||||
|
||||
vms_elem.xpath("ID").each do |id_elem|
|
||||
log_error(
|
||||
"VM #{id_elem.text} is in VRouter #{row[:oid]} VM id list, "<<
|
||||
"but it should not")
|
||||
|
||||
error = true
|
||||
end
|
||||
|
||||
if (error)
|
||||
vrouters_fix[row[:oid]] = vrouter_doc.root.to_s
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def fix_vrouter
|
||||
# DATA: FIX: update vrouter_pool with regenerated documents (ids)
|
||||
@db.transaction do
|
||||
@fixes_vrouter.each do |id, body|
|
||||
@db[:vrouter_pool].where(oid: id).update(body: body)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
Loading…
x
Reference in New Issue
Block a user