extra/devel-vagrant: accept gluster src location from user
Currently gluster source location is hard-coded in vagrantfile. Made the source location user configurable and also fixed minor issue with peer probe with single node. Change-Id: I7057a97d7372477ddbf01fbc8db949923dfd86e8 BUG: 1336354 Signed-off-by: Rajesh Joseph <rjoseph@redhat.com> Reviewed-on: http://review.gluster.org/14354 Smoke: Gluster Build System <jenkins@build.gluster.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
This commit is contained in:
parent
58615482ed
commit
57185759cc
21
extras/devel-tools/devel-vagrant/Vagrantfile
vendored
21
extras/devel-tools/devel-vagrant/Vagrantfile
vendored
@ -13,6 +13,8 @@ node_count = 0
|
||||
disk_count = -1
|
||||
node_name = "Node"
|
||||
ipbase="192.168.99."
|
||||
source_path = "/source/glusterfs"
|
||||
target_path = "/mnt/src"
|
||||
|
||||
if ARGV[0] == "up"
|
||||
environment = open('vagrant_env.conf', 'w')
|
||||
@ -40,9 +42,16 @@ if ARGV[0] == "up"
|
||||
end
|
||||
end
|
||||
|
||||
print "\e[1;37mEnter GlusterFS source location? Default: \"#{source_path}\" : \e[32m"
|
||||
tmploc = $stdin.gets.strip.to_s
|
||||
if tmploc != ""
|
||||
source_path = "#{tmploc}"
|
||||
end
|
||||
|
||||
environment.puts("# BEWARE: Do NOT modify ANY settings in here or your vagrant environment will be messed up")
|
||||
environment.puts(node_count.to_s)
|
||||
environment.puts(disk_count.to_s)
|
||||
environment.puts(source_path)
|
||||
|
||||
print "\e[32m\nOK I will provision #{node_count} VMs for you and each one will have #{disk_count} disks for bricks\e[37m\n\n"
|
||||
system "sleep 1"
|
||||
@ -52,6 +61,7 @@ else # So that we destroy and can connect to all VMs...
|
||||
environment.readline # Skip the comment on top
|
||||
node_count = environment.readline.to_i
|
||||
disk_count = environment.readline.to_i
|
||||
source_path = environment.readline.gsub(/\s+/, "")
|
||||
|
||||
if ARGV[0] != "ssh-config"
|
||||
puts "Detected settings from previous vagrant up:"
|
||||
@ -85,13 +95,17 @@ def attachDisks(numDisk, provider)
|
||||
$devnamecreated = true
|
||||
end
|
||||
|
||||
|
||||
$ansivar["src_path"].push "#{source_path}"
|
||||
$ansivar["trg_path"].push "#{target_path}"
|
||||
|
||||
groups = Hash.new{ |hash,key| hash[key] = [] }
|
||||
|
||||
groups["origin"].push "#{node_name}1"
|
||||
groups["all"].push "#{node_name}1"
|
||||
|
||||
(2..node_count).each do |num|
|
||||
groups["group1"].push "#{node_name}#{num}"
|
||||
$ansivar["peer_nodes"].push "#{node_name}#{num}"
|
||||
groups["all"].push "#{node_name}#{num}"
|
||||
end
|
||||
|
||||
@ -100,7 +114,6 @@ hostsFile = "\n"
|
||||
hostsFile += "#{ipbase}#{( 100 + num).to_s} #{node_name}#{num.to_s}\n"
|
||||
end
|
||||
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
(1..node_count).each do |num|
|
||||
config.vm.define "#{node_name}#{num}" do |node|
|
||||
@ -110,7 +123,7 @@ Vagrant.configure("2") do |config|
|
||||
node.vm.box_url = box_url
|
||||
node.vm.hostname = "#{node_name}#{num}"
|
||||
node.ssh.insert_key = false
|
||||
node.vm.synced_folder "/work/source", "/work/source", type: "nfs"
|
||||
node.vm.synced_folder "#{source_path}", "#{target_path}", type: "nfs"
|
||||
|
||||
# Define basic config for VM, memory, cpu, storage pool
|
||||
node.vm.provider "libvirt" do |virt|
|
||||
@ -124,7 +137,7 @@ Vagrant.configure("2") do |config|
|
||||
node.vm.post_up_message = "\e[37mBuilding of this VM is finished \n"
|
||||
"You can access it now with: \n"
|
||||
"vagrant ssh #{node_name}#{num.to_s}\n\n"
|
||||
"/work/source directory in VM #{node_name}#{num.to_s}"
|
||||
"#{target_path} directory in VM #{node_name}#{num.to_s}"
|
||||
"is synced with Host machine. \nSo any changes done in this"
|
||||
"directory will be reflected in the host machine as well\n"
|
||||
"Beware of this when you delete content from this directory\e[32m"
|
||||
|
@ -1,6 +1,5 @@
|
||||
---
|
||||
- name: Gluster peer probe
|
||||
- name: gluster peer probe
|
||||
shell: gluster peer probe {{ item }}
|
||||
with_items: groups ['group1']
|
||||
|
||||
with_items: "{{ peer_nodes | default([]) }}"
|
||||
|
||||
|
@ -1,9 +1,10 @@
|
||||
---
|
||||
- name: autogen.sh
|
||||
shell: chdir=/work/source/glusterfs ./autogen.sh
|
||||
shell: chdir={{ item }} ./autogen.sh
|
||||
with_items: "{{ trg_path }}"
|
||||
|
||||
- name: configure
|
||||
shell: chdir=/work/source/glusterfs CFLAGS="-g -O0 -Werror -Wall -Wno-error=cpp -Wno-error=maybe-uninitialized" \
|
||||
shell: chdir={{ item }} CFLAGS="-g -O0 -Werror -Wall -Wno-error=cpp -Wno-error=maybe-uninitialized" \
|
||||
./configure \
|
||||
--prefix=/usr \
|
||||
--exec-prefix=/usr \
|
||||
@ -20,7 +21,9 @@
|
||||
--infodir=/usr/share/info \
|
||||
--libdir=/usr/lib64 \
|
||||
--enable-debug
|
||||
with_items: "{{ trg_path }}"
|
||||
|
||||
- name: make install
|
||||
shell: chdir=/work/source/glusterfs make install
|
||||
shell: chdir={{ item }} make install
|
||||
with_items: "{{ trg_path }}"
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
---
|
||||
- hosts: all
|
||||
sudo: true
|
||||
become: yes
|
||||
become_method: sudo
|
||||
roles:
|
||||
- install-pkgs
|
||||
- prepare-brick
|
||||
@ -8,13 +9,15 @@
|
||||
- iptables
|
||||
|
||||
- hosts: all
|
||||
sudo: true
|
||||
become: yes
|
||||
become_method: sudo
|
||||
serial: 1
|
||||
roles:
|
||||
- compile-gluster
|
||||
- service
|
||||
|
||||
- hosts: origin
|
||||
sudo: true
|
||||
become: yes
|
||||
become_method: sudo
|
||||
roles:
|
||||
- cluster
|
||||
|
Loading…
x
Reference in New Issue
Block a user