devel-tools: vagrant setup for creating devel VMs

Created vagrant setup for Gluster developers. The Vagrantfile
is modifed from the work done by Chris (cblum@redhat.com).
Ansible structure is taken from test vagrant infra initially
posted by rtalur.

This vagrant setup can create one or more VM (using libvirt),
create thinly provisioned LVs, format them with XFS and mount them.
It will also compile the source from host machine (/work/source) and
install gluster. It will also do peer probe.

Change-Id: Ie984f661bb5dc9cfc4059643389d3c80b2cae21d
BUG: 1327976
Signed-off-by: Rajesh Joseph <rjoseph@redhat.com>
Reviewed-on: http://review.gluster.org/13734
Smoke: Gluster Build System <jenkins@build.gluster.com>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
Tested-by: Raghavendra Talur <rtalur@redhat.com>
This commit is contained in:
Rajesh Joseph 2016-03-15 12:47:24 +05:30 committed by Jeff Darcy
parent f93761c547
commit a9b78f1ebe
11 changed files with 340 additions and 0 deletions

View File

@ -0,0 +1,152 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
#
# Author: Rajesh Joseph (rjoseph@redhat.com)
# Author: Christopher Blum (cblum@redhat.com)
#
#Variables
box_name = "gluster-dev-fedora"
box_url = "http://download.gluster.org/pub/gluster/glusterfs/vagrant/gluster-dev-fedora/boxes/gluster-dev-fedora.json"
node_count = 0
disk_count = -1
node_name = "Node"
ipbase="192.168.99."
if ARGV[0] == "up"
environment = open('vagrant_env.conf', 'w')
print "\n\e[1;37mEnter Node (or VM) Count? Default: 1 \e[32m"
while node_count < 1 or node_count > 99
node_count = $stdin.gets.strip.to_i
if node_count == 0 # The user pressed enter without input or we cannot parse the input to a number
node_count = 1
elsif node_count < 1
print "\e[31mWe need at least 1 VM ;) Try again \e[32m"
elsif node_count > 99
print "\e[31mWe don't support more than 99 VMs - Try again \e[32m"
end
end
print "\e[1;37mEnter per Node Disc (Brick) Count? Default: 2 \e[32m"
while disk_count < 1
disk_count = $stdin.gets.strip.to_i
if disk_count == 0 # The user pressed enter without input or we cannot parse the input to a number
disk_count = 2
elsif disk_count < 1
print "\e[31mWe need at least 1 disk ;) Try again \e[32m"
end
end
environment.puts("# BEWARE: Do NOT modify ANY settings in here or your vagrant environment will be messed up")
environment.puts(node_count.to_s)
environment.puts(disk_count.to_s)
print "\e[32m\nOK I will provision #{node_count} VMs for you and each one will have #{disk_count} disks for bricks\e[37m\n\n"
system "sleep 1"
else # So that we destroy and can connect to all VMs...
environment = open('vagrant_env.conf', 'r')
environment.readline # Skip the comment on top
node_count = environment.readline.to_i
disk_count = environment.readline.to_i
if ARGV[0] != "ssh-config"
puts "Detected settings from previous vagrant up:"
puts " We deployed #{node_count} VMs with each #{disk_count} disks"
puts ""
end
end
environment.close
$ansivar = Hash.new{ |hash,key| hash[key] = [] }
$devnamecreated = false
#
# Function to create and attach disks
#
def attachDisks(numDisk, provider)
suffix = "bcdefghijklmn".split("")
for i in 1..numDisk.to_i
devname = "vd" + (suffix[i-1]).to_s
if $devnamecreated == false
$ansivar["device"].push "#{devname}"
end
provider.storage :file,
:size => '1G',
:device => "vd" + (suffix[i-1]).to_s,
:type => "qcow2",
:bus => "virtio",
:cache => "default"
end
$devnamecreated = true
end
groups = Hash.new{ |hash,key| hash[key] = [] }
groups["origin"].push "#{node_name}1"
groups["all"].push "#{node_name}1"
(2..node_count).each do |num|
groups["group1"].push "#{node_name}#{num}"
groups["all"].push "#{node_name}#{num}"
end
hostsFile = "\n"
(1..node_count).each do |num|
hostsFile += "#{ipbase}#{( 100 + num).to_s} #{node_name}#{num.to_s}\n"
end
Vagrant.configure("2") do |config|
(1..node_count).each do |num|
config.vm.define "#{node_name}#{num}" do |node|
ip_addr = "#{ipbase}#{(100 + num).to_s}"
node.vm.network "private_network", ip: "#{ip_addr}"
node.vm.box = box_name
node.vm.box_url = box_url
node.vm.hostname = "#{node_name}#{num}"
node.ssh.insert_key = false
node.vm.synced_folder "/work/source", "/work/source", type: "nfs"
# Define basic config for VM, memory, cpu, storage pool
node.vm.provider "libvirt" do |virt|
virt.storage_pool_name = "default"
virt.memory = 1024
virt.cpus = 1
attachDisks( disk_count, virt )
end
node.vm.post_up_message = "\e[37mBuilding of this VM is finished \n"
"You can access it now with: \n"
"vagrant ssh #{node_name}#{num.to_s}\n\n"
"/work/source directory in VM #{node_name}#{num.to_s}"
"is synced with Host machine. \nSo any changes done in this"
"directory will be reflected in the host machine as well\n"
"Beware of this when you delete content from this directory\e[32m"
node.vm.provision :shell, path: "bootstrap.sh"
node.vm.provision "shell", inline: <<-SHELL
echo '#{hostsFile}' | sudo tee -a /etc/hosts
SHELL
if num == node_count
# Let's provision
node.vm.provision "ansible" do |setup|
setup.verbose = "v"
setup.playbook = "ansible/setup.yml"
setup.limit = "all"
setup.sudo = "true"
setup.groups = groups
setup.extra_vars = $ansivar
end
end
end
end
end

View File

@ -0,0 +1,6 @@
---
- name: Gluster peer probe
shell: gluster peer probe {{ item }}
with_items: groups ['group1']

View File

@ -0,0 +1,26 @@
---
- name: autogen.sh
shell: chdir=/work/source/glusterfs ./autogen.sh
- name: configure
shell: chdir=/work/source/glusterfs CFLAGS="-g -O0 -Werror -Wall -Wno-error=cpp -Wno-error=maybe-uninitialized" \
./configure \
--prefix=/usr \
--exec-prefix=/usr \
--bindir=/usr/bin \
--sbindir=/usr/sbin \
--sysconfdir=/etc \
--datadir=/usr/share \
--includedir=/usr/include \
--libdir=/usr/lib64 \
--libexecdir=/usr/libexec \
--localstatedir=/var \
--sharedstatedir=/var/lib \
--mandir=/usr/share/man \
--infodir=/usr/share/info \
--libdir=/usr/lib64 \
--enable-debug
- name: make install
shell: chdir=/work/source/glusterfs make install

View File

@ -0,0 +1,72 @@
---
- name: install deltarpm
dnf: name=deltarpm state=present
- name: update system
shell: dnf update -y
- name: install other packages
dnf: name={{ item }} state=present
with_items:
- attr
- autoconf
- automake
- bison
- cifs-utils
- cscope
- ctags
- dbench
- dos2unix
- e2fsprogs
- findutils
- flex
- fuse-devel
- fuse-libs
- gcc
- gdb
- git
- glib2-devel
- hostname
- libacl-devel
- libaio-devel
- libattr-devel
- libibverbs-devel
- librdmacm-devel
- libtool
- libxml2-devel
- lvm2-devel
- make
- man-db
- mock
- net-tools
- nfs-utils
- openssh-server
- openssl-devel
- perl-Test-Harness
- pkgconfig
- procps-ng
- psmisc
- python-devel
- python-eventlet
- python-netifaces
- python-paste-deploy
- python-setuptools
- python-simplejson
- python-sphinx
- python-webob
- pyxattr
- readline-devel
- rpm-build
- screen
- strace
- supervisor
- systemtap-sdt-devel
- sqlite-devel
- samba*
- userspace-rcu-devel
- vim
- wget
- which
- xfsprogs
- yajl-devel

View File

@ -0,0 +1,3 @@
---
- name: disable iptables, need to add specific rules later
shell: iptables -F

View File

@ -0,0 +1,30 @@
---
- name: Create physical device
shell: pvcreate /dev/{{ item }}
with_items: "{{ device }}"
- name: Create volume group
shell: vgcreate vg{{ item }} /dev/{{ item }}
with_items: "{{ device }}"
- name: Create thin pool
shell: lvcreate -L 950M -T vg{{ item }}/thinpool
with_items: "{{ device }}"
- name: Create thin volume
shell: lvcreate -V900M -T vg{{ item }}/thinpool -n thinp1
with_items: "{{ device }}"
- name: Format backend
filesystem: fstype=xfs dev=/dev/vg{{ item }}/thinp1
with_items: "{{ device }}"
- name: Create mount directory
file: path=/bricks/br{{ item }} state=directory recurse=yes
with_items: "{{ device }}"
- name: Add entry to fstab and mount
mount: name=/bricks/br{{ item }} src=/dev/vg{{ item }}/thinp1 fstype=xfs state=mounted
with_items: "{{ device }}"

View File

@ -0,0 +1,3 @@
---
- name: Allow gfapi in Samba to bind to other ports than well known smb ports
seboolean: name=samba_load_libgfapi state=yes persistent=yes

View File

@ -0,0 +1,21 @@
---
- name: disable kernel nfs
service: name=nfs-server enabled=no
- name: stop kernel nfs
service: name=nfs-server state=stopped
- name: enable rpcbind
service: name=rpcbind enabled=yes
- name: start rpcbind
service: name=rpcbind state=started
- name: enable glusterd
service: name=glusterd enabled=yes
- name: start glusterd
service: name=glusterd state=started

View File

@ -0,0 +1,20 @@
---
- hosts: all
sudo: true
roles:
- install-pkgs
- prepare-brick
- selinux
- iptables
- hosts: all
sudo: true
serial: 1
roles:
- compile-gluster
- service
- hosts: origin
sudo: true
roles:
- cluster

View File

@ -0,0 +1,3 @@
dnf install -y nfs-utils
dnf install -y vim
dnf install -y python2 python2-dnf libselinux-python libsemanage-python

View File

@ -0,0 +1,4 @@
#!/bin/sh
vagrant up --no-provision $@
vagrant provision