Lab setup data from the Pluralsight course on
Contents:
LVS and Load Balancing
Environment:
Two LXC containers running Ubuntu 14.04 and Apache as webservers and one Ubuntu 14.04 LXC container as LVS server - all within a single network (10.0.3.0/24).
The kernel modification (modprobe) took place on the LVS server container's host.
sudo modprobe ip_vs
ssh ubuntu@10.0.3.102
cat /proc/net/ip_vs
apt-get install ipvsadm
sudo nano /etc/sysctl.conf
net.ipv4.ip_forward=1
net.ipv4.ip_nonlocal_bind=1
sudo sysctl -p
sudo nano /etc/default/ipvsadm
true; master
sudo service ipvsadm start
sudo ipvsadm -l
ipvsadm -C
ipvsadm -A -t 192.168.0.111:80 -s wlc
ipvsadm -a -t 192.168.0.111:80 -r 10.0.3.77:80 -m
ipvsadm -a -t 192.168.0.111:80 -r 10.0.3.19:80 -m
ipvsadm -l
ipvsadm-save > ipvsadm.conf
LVS and Keepalived
Environment:
Two LXC containers running Ubuntu 14.04 as Keepalived servers - all within a single network (10.0.3.0/24).
sudo apt-get install build-essential libssl-dev
wget http://www.keepalived.org/software/keepalived-1.2.19.tar.gz
tar xzvf keepalived-1.2.19.tar.gz
cd keepalived-1.2.19.tar.gz
sudo ./configure
sudo make
sudo make install
sudo nano /etc/init/keepalived.conf
# description keepalived
start on runlevel [2345]
stop on runlevel [!2345]
respawn
exec /usr/local/sbin/keepalived --dont-fork
sudo mkdir /etc/keepalived
sudo nano /etc/keepalived/keepalive.conf
sudo nano /etc/sysctl.conf
net.ipv4.ip_nonlocal_bind = 1
sudo sysctl -p
sudo service keepalived start
sudo service keepalived stop
/etc/keepalived/keepalived.conf
! Configuration File for keepalived
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 150
advert_int 1
authentication {
auth_type PASS
auth_pass $ place secure password here.
}
virtual_ipaddress {
10.0.3.243
}
}
LVS and ldirectord
Environment:
One LXC container running Ubuntu 16.04 as an ldirectord server and two webservers - all within a single network (10.0.3.0/24).
sudo apt install ldirectord
less /etc/init.d/ldirectord
cat /etc/default/ldirectord
sudo nano /etc/default/ipvsadm
sudo systemctl start ipvsadm
sudo nano /etc/sysctl.conf
sudo sysctl -p
sudo nano /etc/ha.d/conf/ldirectord.cf
sudo systemctl start ldirectord
sudo ipvsadm -L -n
HAProxy Load Balancing
Environment:
One LXC container running Ubuntu 16.04 as an HAProxy server and two webservers - all within a single network (10.0.3.0/24).
sudo apt-get install haproxy
sudo systemctl start haproxy
sudo nano /etc/haproxy/haproxy.cfg
frontend mylistener
bind *:80
mode http
default_backend nodes
backend nodes
mode http
balance roundrobin
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk HEAD / HTTP/1.1\r\nHost:localhost
server server01 10.0.3.190:80 check
server server02 10.0.3.142:80 check
sudo systemctl restart haproxy
sudo systemctl status haproxy
curl 10.0.3.217
sudo systemctl restart haproxy
sudo systemctl status haproxy
curl 10.0.3.217
Pacemaker Installation
Environment:
On two VirtualBox-based VMs running CentOS 7 on a single network (10.0.3.0/24). If necessary, open TCP ports 2224, 3121, 21064, and UDP port 5405 using:
firewall-cmd --zone=public --add-port=2224/tcp --permanent
firewall-cmd --reload
sudo nano /etc/httpd/conf.d/status.conf
SetHandler server-status
Order Deny,Allow
Deny from all
Allow from 127.0.0.1
systemctl enable httpd.service
yum install pacemaker pcs
systemctl start pcsd
systemctl enable pcsd
passwd hacluster
nano /etc/hosts
127.0.0.1 localhost
10.0.3.1 MyNode1
10.0.3.2 MyNode2
pcs cluster auth MyNode1 MyNode2
pcs cluster setup --name mycluster MyNode1 MyNode2
pcs cluster start --all
systemctl start pacemaker.service
systemctl enable corosync
systemctl enable pacemaker
pcs status
pcs property set no-quorum-policy=ignore
pcs property set stonith-enabled=false
pcs config
Pacemaker Installation: Ubuntu 14.04
Environment:
Two Ubuntu 14.04 VMs running in VirtualBox - both configured as Apache webservers and Pacemaker cluster nodes - all within a single network (10.0.3.0/24).
sudo apt-get install pacemaker
sudo corosync-keygen
sudo chmod 400 /etc/corosync/authkey
sudo nano /etc/corosync/service.d/pcmk
sudo nano /etc/default/corosync
START=yes
sudo service corosync start
sudo corosync-cmapctl | grep members
sudo update-rc.d pacemaker defaults 20 01
sudo service pacemaker start
sudo crm_mon
sudo crm
Pacemaker Command Line Administration
Environment:
Pacemaker PCS command line administration
Environment: two CentOS 7 VMs running in VirtualBox - both configured as Apache webservers and Pacemaker cluster nodes - all within a single network (10.0.3.0/24).
pcs status nodes
pcs config
pcs constraint
[manage resource agents]
pcs resource create track-apache systemd:httpd configfile=/etc/httpd/conf/httpd.conf statusurl="http://127.0.0.1/server-status" op monitor interval=30s
pcs resource show
pcs resource create virtual_ip ocf:heartbeat:IPaddr2 ip=10.0.3.135 cidr_netmask=32 op monitor interval=30s
pcs resource show
pcs resource restart virtual_ip
pcs resource delete virtual_ip
DRBD
Environment:
Two Ubuntu 16.04 VMs running in VirtualBox within a single network (10.0.3.0/24). If you need to write over your drive, use dd:
sudo dd if=/dev/zero of=/dev/sda6 bs=1M count=128
sudo nano /etc/drdb.d/global_common.conf
sudo drbdadm create-md r0
sudo systemctl start drbd.service
sudo drbdadm -- --overwrite-data-of-peer primary all [on primary]
sudo mkfs.ext3 /dev/drbd0
sudo mount /dev/drbd0 /srv
sudo cp -r /etc/hosts /srv
sudo umount /srv
sudo drbdadm secondary r0
sudo drbdadm primary r0 [on the other node]
sudo mount /dev/drbd0 /srv
sudo apt install clvm
sudo systemctl start clvm
sudo systemctl disable drbd
sudo umount /srv
sudo drbdadm down r0
sudo apt-get install -y pacemaker
sudo nano /etc/corosync/corosync.conf
sudo systemctl restart corosync
sudo systemctl start pacemaker
OCFS2
Environment:
Two Ubuntu 16.04 VMs running in VirtualBox within a single network (10.0.3.0/24).
sudo apt-get install ocfs2console
exit
ssh -X ubuntu@10.0.3.150
less /etc/default/o2cb
sudo nano /etc/ocfs2/cluster.conf
cluster:
node_count = 2
name = mycluster
node:
ip_port = 7777
ip_address = 10.0.3.147
number = 7
name = ubuntu16a
cluster = mycluster
node:
ip_port = 7777
ip_address = 10.0.3.166
number = 2
name = ubuntu16b
cluster = mycluster
sudo systemctl start ocfs2
sudo mkfs.ocfs2 -L "ubuntu16a-volume" /dev/sda6
sudo mkdir /storage
sudo mount /dev/sda6 /storage
sudo mounted.ocfs2 -d
sudo mounted.ocfs2 -f
GFS2
Environment:
Two CentOS 7 VMs running in VirtualBox all within a single network (10.0.3.0/24).
df -h
umount -l /dev/mapper/centos-srv
pvcreate /dev/mapper/centos-srv
vgcreate vg-gfs /dev/mapper/centos-srv
lvcreate -L 3G -n myvolume vg-gfs
mkfs.gfs2 -p lock_dlm -t MyCluster:myvolume -j 2 /dev/vg-gfs/myvolume
mkdir /volumes
mount /dev/mapper/vg--gfs-myvolume /volumes
nano cat /etc/fstab
fsck.gfs2 -y /dev/centos/srv
gfs2_grow /home/MyMountPoint
gfs2_tool journals /home/MyMountPoint
gfs2_jadd -j2 /home/MyMountPoint
gfs2_edit /dev/mapper/vg-gfs-myvolume
GFS2-Pacemaker Integration
Environment:
Two CentOS 7 VMs running in VirtualBox with Pacemaker installed and running all within a single network (10.0.3.0/24).
yum install -y gfs2-utils dlm
pcs cluster cib MyDLM_cfg
pcs -f MyDLM_cfg resource create MyDLM ocf:pacemaker:controld op monitor interval=60s
pcs -f MyDLM_cfg resource clone MyDLM clone-max=2 clone-node-max=1
pcs -f MyDLM_cfg resource show
pcs cluster cib-push MyDLM_cfg