April 2024
M T W T F S S
1234567
891011121314
15161718192021
22232425262728
2930  

Categories

April 2024
M T W T F S S
1234567
891011121314
15161718192021
22232425262728
2930  

CENTOS 6.5 GFS CLUSTER

CentOS 6.5 x64 RHCS GFS

cluster1.rmohan.com
cluster2.rmohan.com

# cat /etc/hosts

192.168.0.10 cluster1.rmohan.com cluster1
192.168.0.11 cluster2.rmohan.com cluster2

[root@cluster1 ~]# iptables -F
[root@cluster1 ~]# iptables-save > /etc/sysconfig/iptables
[root@cluster1 ~]# /etc/init.d/iptables restart
iptables: Setting chains to policy ACCEPT: filter [ OK ]
iptables: Flushing firewall rules: [ OK ]
iptables: Unloading modules: [ OK ]
iptables: Applying firewall rules: [ OK ]
[root@cluster1 ~]# vi /etc/selinux/config
[root@cluster1 ~]#

[root@cluster1 ~]# cat /etc/selinux/config

# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing – SELinux security policy is enforced.
# permissive – SELinux prints warnings instead of enforcing.
# disabled – No SELinux policy is loaded.
SELINUX=permissive
# SELINUXTYPE= can take one of these two values:
# targeted – Targeted processes are protected,
# mls – Multi Level Security protection.
SELINUXTYPE=targeted

yum install iscsi-initiator-utils
chkconfig iscsid on
service iscsid start

yum install ntp

chkconfig ntpd on

service ntpd start

[root@cluster ~]# chkconfig –level 345 ntpd on
[root@cluster2 ~]# /etc/init.d/ntpd
ntpd ntpdate
[root@cluster2 ~]# /etc/init.d/ntpd restart
Shutting down ntpd: [FAILED]
Starting ntpd: [ OK ]
[root@cluster2 ~]# clear

ntpdate -u 0.centos.pool.ntp.org
17 Feb 21:32:32 ntpdate[12196]: adjust time server 103.11.143.248 offset 0.000507 sec

[root@cluster1 ~]# date
Tue Feb 17 21:32:49 SGT 2015
[root@cluster1 ~]#

# iscsiadm -m discovery -t sendtargets -p 192.168.1.50

qdisk 100MB
data 20GB

[root@cluster2 ~]# yum install iscsi-initiator-utils
Loaded plugins: fastestmirror
Setting up Install Process
Loading mirror speeds from cached hostfile
* base: centos.ipserverone.com
* extras: singo.ub.ac.id
* updates: centos.ipserverone.com
Resolving Dependencies
–> Running transaction check
—> Package iscsi-initiator-utils.x86_64 0:6.2.0.873-13.el6 will be installed
–> Finished Dependency Resolution

Dependencies Resolved

========================================================================================================================================
Package Arch Version Repository Size
========================================================================================================================================
Installing:
iscsi-initiator-utils x86_64 6.2.0.873-13.el6 base 719 k

Transaction Summary
========================================================================================================================================
Install 1 Package(s)

Total download size: 719 k
Installed size: 2.4 M
Is this ok [y/N]: y
Downloading Packages:
iscsi-initiator-utils-6.2.0.873-13.el6.x86_64.rpm | 719 kB 00:00
Running rpm_check_debug
Running Transaction Test
Transaction Test Succeeded
Running Transaction
Installing : iscsi-initiator-utils-6.2.0.873-13.el6.x86_64 1/1
Verifying : iscsi-initiator-utils-6.2.0.873-13.el6.x86_64 1/1

Installed:
iscsi-initiator-utils.x86_64 0:6.2.0.873-13.el6

Complete!

[root@cluster2 ~]# iscsiadm -m discovery -t sendtargets -p 192.168.1.50
Starting iscsid: [ OK ]
192.168.1.50:3260,1 iqn.2006-01.com.openfiler:tsn.5ed5c1620415
[root@cluster2 ~]# iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.5ed5c1620415 -p 192.168.1.50 -l
Logging in to [iface: default, target: iqn.2006-01.com.openfiler:tsn.5ed5c1620415, portal: 192.168.1.50,3260] (multiple)
Login to [iface: default, target: iqn.2006-01.com.openfiler:tsn.5ed5c1620415, portal: 192.168.1.50,3260] successful.
[root@cluster2 ~]#

[root@cluster2 ~]# cat /proc/partitions
major minor #blocks name

8 0 20971520 sda
8 1 512000 sda1
8 2 20458496 sda2
253 0 18358272 dm-0
253 1 2097152 dm-1
8 16 19988480 sdb
8 32 327680 sdc

[root@cluster2 ~]# fdisk -l

Disk /dev/sda: 21.5 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00098b76

Device Boot Start End Blocks Id System
/dev/sda1 * 1 64 512000 83 Linux
Partition 1 does not end on cylinder boundary.
/dev/sda2 64 2611 20458496 8e Linux LVM

Disk /dev/mapper/vg_cluster2-lv_root: 18.8 GB, 18798870528 bytes
255 heads, 63 sectors/track, 2285 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000

Disk /dev/mapper/vg_cluster2-lv_swap: 2147 MB, 2147483648 bytes
255 heads, 63 sectors/track, 261 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000

Disk /dev/sdb: 20.5 GB, 20468203520 bytes
64 heads, 32 sectors/track, 19520 cylinders
Units = cylinders of 2048 * 512 = 1048576 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000

Disk /dev/sdc: 335 MB, 335544320 bytes
11 heads, 59 sectors/track, 1009 cylinders
Units = cylinders of 649 * 512 = 332288 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000

[root@cluster2 ~]#

Second, the installation package RHCS

1) cluster1 (managed node) installation package RHCS, luci end management software package, install only the management side.

yum -y install luci cman odcluster ricci gfs2-utils rgmanager lvm2-cluster

2) cluster2 installation package RHCS

yum -y install cman odcluster ricci gfs2-utils rgmanager lvm2-cluster

3) cluster1, cluster2 nodes ricci change user passwords

passwd ricci

4) Configuration RHCS service boot

chkconfig ricci on
chkconfig rgmanager on
chkconfig cman on
service ricci start
service rgmanager start
service cman start

[root@cluster1 ~]# chkconfig ricci on
chkconfig cman on
service ricci start
service rgmanager start
[root@cluster1 ~]# chkconfig rgmanager on
service cman start
[root@cluster1 ~]# chkconfig cman on
[root@cluster1 ~]# service ricci start
Starting system message bus: [ OK ]
Starting oddjobd: [ OK ]
generating SSL certificates… done
Generating NSS database… done
Starting ricci: [ OK ]
[root@cluster1 ~]# service rgmanager start
Starting Cluster Service Manager: [ OK ]
[root@cluster1 ~]# service cman start
Starting cluster:
Checking if cluster has been disabled at boot… [ OK ]
Checking Network Manager… [ OK ]
Global setup… [ OK ]
Loading kernel modules… [ OK ]
Mounting configfs… [ OK ]
Starting cman… xmlconfig cannot find /etc/cluster/cluster.conf
[FAILED]
Stopping cluster:
Leaving fence domain… [ OK ]
Stopping gfs_controld… [ OK ]
Stopping dlm_controld… [ OK ]
Stopping fenced… [ OK ]
Stopping cman… [ OK ]
Unloading kernel modules… [ OK ]
Unmounting configfs… [ OK ]

[root@cluster2 ~]# service ricci start
Starting system message bus: [ OK ]
Starting oddjobd: [ OK ]
generating SSL certificates… done
Generating NSS database… done
Starting ricci: [ OK ]
[root@cluster2 ~]# service rgmanager start
Starting Cluster Service Manager: [ OK ]
[root@cluster2 ~]# service cman start
Starting cluster:
Checking if cluster has been disabled at boot… [ OK ]
Checking Network Manager… [ OK ]
Global setup… [ OK ]
Loading kernel modules… [ OK ]
Mounting configfs… [ OK ]
Starting cman… xmlconfig cannot find /etc/cluster/cluster.conf
[FAILED]
Stopping cluster:
Leaving fence domain… [ OK ]
Stopping gfs_controld… [ OK ]
Stopping dlm_controld… [ OK ]
Stopping fenced… [ OK ]
Stopping cman… [ OK ]
Unloading kernel modules… [ OK ]
Unmounting configfs… [ OK ]
[root@cluster2 ~]#

Install the start luci service on the management node cluster1

1) Start luci Service

[root@cluster1 ~]# chkconfig luci on
[root@cluster1 ~]# service luci start
Adding following auto-detected host IDs (IP addresses/domain names), corresponding to `cluster1.rmohan.com’ address, to the configuration of self-managed certificate `/var/lib/luci/etc/cacert.config’ (you can change them by editing `/var/lib/luci/etc/cacert.config’, removing the generated certificate `/var/lib/luci/certs/host.pem’ and restarting luci):
(none suitable found, you can still do it manually as mentioned above)

Generating a 2048 bit RSA private key
writing new private key to ‘/var/lib/luci/certs/host.pem’
Start luci… [ OK ]
Point your web browser to https://cluster1.rmohan.com:8084 (or equivalent) to access luci
[root@cluster1 ~]#

[root@cluster1 ~]#

RHCS cluster configuration

https://192.168.1.10:8084/homebase/

GFS 001

1. Add a cluster

Log into the management interface, click Manage Clusters -> Create fill in the following:

Cluster Name: gfs

GFS 002

GFS 003

Cluster Name: gfs

NodeName Password RicciHostname Ricci Port
cluster1.rmohan.com test123 cluster1.rmohan.com 11111
cluster2.rmohan.com test123 cluster2.rmohan.com 11111

Select the following options, and then submit the
Use locally installed packages.

Note: This step will create a cluster configuration file /etc/cluster/cluster.conf

Fence Devices

Description:
RHCS To achieve a complete clustering capabilities, we must realize fence function.
Due to the non-physical server configuration and other conditions, especially the use of VMware ESXi5.X virtual fence fence to realize the function of the device.
It is thanks to the fence device can be used, was able to complete the test RHCS function.

(1) log into the management interface, click Cluster-> Fence Devices ->
(2) Select the “Add selection VMware Fencing (SOAP Interface)
(3) Name “ESXi_fence”
(4) IP Address or Hostname “192.168.1.31” (ESXi Address)
(5) Login “root”
(6) Password “test123”

GFS 004

GFS 005

3. The device node binding Fence

Adding a node a fence

1) Click cluster1.rmohan.com node,

Add Fence Method, here fill node01_fence;
2) add a fence instance, select “ESXi_fence” VMware Fencing (SOAP Interface)
3) VM NAME “kvm_cluster1”
4) VM UUID “564d6fbf-05fb-1dd1-fb66-7ea3c85dcfdf “check ssl

Description: VMNAME: virtual machine name, VM UUID: virtual machine.vmx file ”
uuid.location, “value, using the following format string.

# /usr/sbin/fence_vmware_soap -a 192.168.1.31 -z -l root -p test123 -n kvm_node2 -o list
kvm_cluster2,564d4c42-e7fd-db62-3878-57f77df2475e
kvm_cluster1,564d6fbf-05fb-1dd1-fb66-7ea3c85dcfdf

Adding a node two fence

1) Click cluster2.rmohan.com node, Add Fence Method, here fill node02_fence;
2) add a fence instance, select “ESXi_fence” VMware Fencing (SOAP Interface)
3) VM NAME “kvm_cluster2”
4) VM UUID “564d4c42-e7fd-db62-3878-57f77df2475e “check ssl

# Manual test fence function example:
GFS 006

GFS 007

# Manual test fence function example:

# /usr/sbin/fence_vmware_soap -a 192.168.1.31 -z -l root -p test123 -n kvm_node2 -o reboot
Status: ON

Options:
-o: List, status, and other parameters reboot

4. Add Failover Domains Configuration

Name “gfs_failover”
Prioritized
Restricted
cluster1.rmohan.com 1
cluster2.rmohan.com 1

5. Configure GFS Service

(1) GFS Service Configuration

In cluster1.rmohan.com, cluster2.rmohan.comstart CLVM services were integrated cluster lock

lvmconf –enable-cluster
chkconfig clvmd on

service clvmd start
Activating VG(s): No volume groups found [ OK ]

[root@cluster1 ~]# service clvmd start
Activating VG(s): 2 logical volume(s) in volume group “vg_cluster1” now active
[ OK ]
[root@cluster1 ~]#

[root@cluster2 ~]# service clvmd start
Activating VG(s): 2 logical volume(s) in volume group “vg_cluster2” now active
[ OK ]
[root@cluster2 ~]#

cluster1.rmohan.com

pvcreate /dev/sdb1

[root@cluster1 ~]# pvcreate /dev/sdb1
Physical volume “/dev/sdb1” successfully created

[root@cluster1 ~]# vgcreate gfsvg /dev/sdb1
Clustered volume group “gfsvg” successfully created

# pvcreate /dev/sdc1
Physical volume “/dev/sdc1” successfully created

# pvs
PV VG Fmt Attr PSize PFree
/dev/sda2 vg_node01 lvm2 a– 39.51g 0
/dev/sdc1 lvm2 a– 156.25g 156.25g

# vgcreate gfsvg /dev/sdb1
Clustered volume group “gfsvg” successfully created

Failed to activate new LV.
[root@cluster1 ~]# lvcreate -l +100%FREE -n data gfsvg
Error locking on node cluster2.rmohan.com: Volume group for uuid not found: EjHAOyOMJtk7pJ1gcUeOrbXjgFKMl05y2a3Mdh27oxKVpVBXjLYxHeU6088U9Ptc
Failed to activate new LV.

Note: just reboot the cluster2

[root@cluster1 ~]# lvcreate -l +100%FREE -n data gfsvg
clvmd not running on node cluster2.rmohan.com
Unable to drop cached metadata for VG gfsvg.
clvmd not running on node cluster2.rmohan.com

cluster2
# /etc/init.d/clvmd start

(3) GFS file system format

cluster1 node:

[root@cluster1 ~]# mkfs.gfs2 -p lock_dlm -t gfs:gfs2 -j 2 /dev/gfsvg/data
This will destroy any data on /dev/gfsvg/data.
It appears to contain: symbolic link to `../dm-2′

Are you sure you want to proceed? [y/n] y

Device: /dev/gfsvg/data
Blocksize: 4096
Device Size 19.06 GB (4996096 blocks)
Filesystem Size: 19.06 GB (4996093 blocks)
Journals: 2
Resource Groups: 77
Locking Protocol: “lock_dlm”
Lock Table: “gfs:gfs2”
UUID: aaecbd43-cd34-fc15-61c8-29fb0c282279

Description:
gfs: gfs2 gfs is the name of the cluster, gfs2 is to define the name, the equivalent of the label.
-j mount the file system specified number of hosts, you do not specify a default is the management node.
Here there are two nodes experiment

6. mount the GFS file system

cluster1, create a mount point on cluster2 GFS

[root@cluster1 ~]# mkdir /vmdata
[root@cluster1 ~]# mount.gfs2 /dev/gfsvg/data /vmdata
[root@cluster1 ~]#

[root@cluster2 ~]# mkdir /vmdata
[root@cluster2 ~]# mount.gfs2 /dev/gfsvg/data /vmdata
[root@cluster2 ~]# df -TH
Filesystem Type Size Used Avail Use% Mounted on
/dev/mapper/vg_cluster2-lv_root
ext4 19G 1.1G 17G 6% /
tmpfs tmpfs 2.0G 33M 2.0G 2% /dev/shm
/dev/sda1 ext4 500M 52M 422M 11% /boot
/dev/gfsvg/data gfs2 21G 272M 21G 2% /vmdata
[root@cluster2 ~]#

GFS 008

GFS 009

GFS 010

GFS 011

GFS 012

GFS 013

GFS 014

GFS 015

GFS 016

7. Configure the voting disk

Description:
# voting disk is a shared disk, no need for too much, in this case using the /dev/sdc1 300MB to be created.

fdisk -l

Device Boot Start End Blocks Id System
/dev/sdc1 1 1009 327391 8e Linux LVM

Disk /dev/mapper/gfsvg-data: 20.5 GB, 20464009216 bytes
255 heads, 63 sectors/track, 2487 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000

mkqdisk -c /dev/sdc1 -l myqdisk

[root@cluster1 ~]# mkqdisk -c /dev/sdc1 -l myqdisk
mkqdisk v3.0.12.1

Writing new quorum disk label ‘myqdisk’ to /dev/sdc1.
WARNING: About to destroy all data on /dev/sdc1; proceed [N/y] ? y
Initializing status block for node 1…
Initializing status block for node 2…
Initializing status block for node 3…
Initializing status block for node 4…
Initializing status block for node 5…
Initializing status block for node 6…
Initializing status block for node 7…
Initializing status block for node 8…
Initializing status block for node 9…
Initializing status block for node 10…
Initializing status block for node 11…
Initializing status block for node 12…
Initializing status block for node 13…
Initializing status block for node 14…
Initializing status block for node 15…
Initializing status block for node 16…

[root@cluster1 ~]# mkqdisk -L
mkqdisk v3.0.12.1

/dev/block/8:33:
/dev/disk/by-id/scsi-14f504e46494c45525a344e426c512d623153702d326e417a-part1:
/dev/disk/by-path/ip-192.168.1.50:3260-iscsi-iqn.2006-01.com.openfiler:tsn.5ed5c1620415-lun-1-part1:
/dev/sdc1:
Magic: eb7a62c2
Label: myqdisk
Created: Tue Feb 17 22:56:30 2015
Host: cluster1.rmohan.com
Kernel Sector Size: 512
Recorded Sector Size: 512

(3) Voting Disk configuration qdisk

# Into the management interface Manage Clusters -> gfs -> Configure -> QDisk

Device : /dev/sdc1

Path to program : ping -c3 -t2 192.168.0.253
Interval : 3
Score : 2
TKO : 10
Minimum Score : 1

# Click apply

(4) Start qdisk Service

chkconfig qdiskd on
service qdiskd start
clustat -l

[root@cluster1 ~]# clustat -l
Cluster Status for gfs @ Tue Feb 17 23:10:24 2015
Member Status: Quorate

Member Name ID Status
—— —- —- ——
cluster1.rmohan.com 1 Online, Local
cluster2.rmohan.com 2 Online
/dev/sdc1 0 Online, Quorum Disk

[root@cluster2 ~]# clustat -l
Cluster Status for gfs @ Tue Feb 17 23:10:29 2015
Member Status: Quorate

Member Name ID Status
—— —- —- ——
cluster1.rmohan.com 1 Online
cluster2.rmohan.com 2 Online, Local
/dev/sdc1 0 Online, Quorum Disk

Leave a Reply

You can use these HTML tags

<a href="" title=""> <abbr title=""> <acronym title=""> <b> <blockquote cite=""> <cite> <code> <del datetime=""> <em> <i> <q cite=""> <s> <strike> <strong>