April 2024
M T W T F S S
1234567
891011121314
15161718192021
22232425262728
2930  

Categories

April 2024
M T W T F S S
1234567
891011121314
15161718192021
22232425262728
2930  

centos 7 cluster

[root@clusterserver1 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.20 clusterserver1.rmohan.com clusterserver1
192.168.1.21 clusterserver2.rmohan.com clusterserver2
192.168.1.22 clusterserver3.rmohan.com clusterserver3

perl -pi.orig -e ‘s/SELINUX=enforcing/SELINUX=permissive/g’ /etc/selinux/config

setenforce 0

timedatectl status

yum install -y ntp
systemctl enable ntpd ; systemctl start ntpd

run ssh-keygen

[root@clusterserver1 ~]# ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Created directory ‘/root/.ssh’.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
e4:57:e7:7c:2e:dd:82:9f:d5:c7:57:f9:ef:ce:d5:e0 root@clusterserver1.rmohan.com
The key’s randomart image is:
+–[ RSA 2048]—-+
| |
| |
| . . . |
| o . + .|
| S . +.o|
| . o **|
| . E &|
| . *=|
| oo=|
+—————–+
[root@clusterserver1 ~]#

for i in clusterserver1 clusterserver2 clusterserver3 ; do ssh-copy-id $i; done

/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed — if you are prompted now it is to install the new keys
root@clusterserver1’s password:
Permission denied, please try again.
root@clusterserver1’s password:

Number of key(s) added: 1

Now try logging into the machine, with: “ssh ‘clusterserver1′”
and check to make sure that only the key(s) you wanted were added.

The authenticity of host ‘clusterserver2 (192.168.1.21)’ can’t be established.
ECDSA key fingerprint is 43:25:9c:32:53:18:33:a9:25:f7:cd:bb:b0:64:80:fd.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed — if you are prompted now it is to install the new keys
root@clusterserver2’s password:

Number of key(s) added: 1

Now try logging into the machine, with: “ssh ‘clusterserver2′”
and check to make sure that only the key(s) you wanted were added.

The authenticity of host ‘clusterserver3 (192.168.1.22)’ can’t be established.
ECDSA key fingerprint is 62:79:b1:c7:9b:de:a3:5e:a4:3d:e0:15:2b:f8:c2:f7.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed — if you are prompted now it is to install the new keys
root@clusterserver3’s password:

Number of key(s) added: 1

Now try logging into the machine, with: “ssh ‘clusterserver3′”
and check to make sure that only the key(s) you wanted were added.

yum install iscsi-initiator-utils -y

systemctl enable iscsi
systemctl start iscsi

iscsiadm -m discovery -t sendtargets -p 192.168.1.90:3260

iscsiadm –mode node –targetname iqn.2006-01.com.openfiler:tsn.b01850dab96a –portal 192.168.1.90 –login

iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.b01850dab96a -p 192.168.1.90:3260 -l

Install corosync and pacemaker on the nodes

yum -y install lvm2-cluster corosync pacemaker pcs fenceagents-all

systemctl enable pcsd.service

systemctl start pcsd.service

echo test123 | passwd –stdin hacluster

pcs cluster auth clusterserver1 clusterserver2 clusterserver3

[root@clusterserver1 ~]# pcs cluster auth clusterserver1 clusterserver2 clusterserver3
Username: hacluster
Password:
clusterserver3: Authorized
clusterserver2: Authorized
clusterserver1: Authorized
[root@clusterserver1 ~]#

[root@clusterserver1 ~]# ls -lt /var/lib/pcsd/
total 20
-rw——- 1 root root 250 Jan 4 03:33 tokens
-rw-r–r– 1 root root 1542 Jan 4 03:33 pcs_users.conf
-rwx—— 1 root root 60 Jan 4 03:28 pcsd.cookiesecret
-rwx—— 1 root root 1233 Jan 4 03:28 pcsd.crt
-rwx—— 1 root root 1679 Jan 4 03:28 pcsd.key
[root@clusterserver1 ~]#

pcs cluster setup –name webcluster clusterserver1 clusterserver2 clusterserver3

[root@clusterserver1 ~]# pcs cluster setup –name webcluster clusterserver1 clusterserver2 clusterserver3
Shutting down pacemaker/corosync services…
Redirecting to /bin/systemctl stop pacemaker.service
Redirecting to /bin/systemctl stop corosync.service
Killing any remaining services…
Removing all cluster configuration files…
clusterserver1: Succeeded
clusterserver2: Succeeded
clusterserver3: Succeeded
Synchronizing pcsd certificates on nodes clusterserver1, clusterserver2, clusterserver3…
clusterserver3: Success
clusterserver2: Success
clusterserver1: Success

Restaring pcsd on the nodes in order to reload the certificates…
clusterserver3: Success
clusterserver2: Success
clusterserver1: Success
[root@clusterserver1 ~]#

[root@clusterserver1 ~]# ls /etc/corosync/
corosync.conf corosync.conf.example corosync.conf.example.udpu corosync.xml.example uidgid.d/
[root@clusterserver1 ~]# ls /etc/corosync/
corosync.conf corosync.conf.example corosync.conf.example.udpu corosync.xml.example uidgid.d/
[root@clusterserver1 ~]# ls /etc/corosync/*
/etc/corosync/corosync.conf /etc/corosync/corosync.conf.example /etc/corosync/corosync.conf.example.udpu /etc/corosync/

/etc/corosync/uidgid.d:
[root@clusterserver1 ~]#
[root@clusterserver1 corosync]# cat corosync.conf
totem {
version: 2
secauth: off
cluster_name: webcluster
transport: udpu
}

nodelist {
node {
ring0_addr: clusterserver1
nodeid: 1
}

node {
ring0_addr: clusterserver2
nodeid: 2
}

node {
ring0_addr: clusterserver3
nodeid: 3
}
}

quorum {
provider: corosync_votequorum
}

logging {
to_logfile: yes
logfile: /var/log/cluster/corosync.log
to_syslog: yes
}
[root@clusterserver1 corosync]#

[root@clusterserver2 ~]# pcs status
Error: cluster is not currently running on this node
[root@clusterserver2 ~]#

[root@clusterserver3 ~]# pcs status
Error: cluster is not currently running on this node
[root@clusterserver3 ~]#

pcs cluster enable –all

[root@clusterserver1 corosync]# pcs cluster enable –all
clusterserver1: Cluster Enabled
clusterserver2: Cluster Enabled
clusterserver3: Cluster Enabled
[root@clusterserver1 corosync]#

Start the cluster
•From any node: pcs cluster start –all

pcsd: active/enabled
[root@clusterserver1 corosync]# pcs status
Cluster name: webcluster
WARNING: no stonith devices and stonith-enabled is not false
Last updated: Mon Jan 4 03:39:26 2016 Last change: Mon Jan 4 03:39:24 2016 by hacluster via crmd on clusterserver1
Stack: corosync
Current DC: clusterserver1 (version 1.1.13-10.el7-44eb2dd) – partition with quorum
3 nodes and 0 resources configured

Online: [ clusterserver1 clusterserver2 clusterserver3 ]

Full list of resources:

PCSD Status:
clusterserver1: Online
clusterserver2: Online
clusterserver3: Online

Daemon Status:
corosync: active/enabled
pacemaker: active/enabled
pcsd: active/enabled
[root@clusterserver1 corosync]#

Verify Corosync Installation
•corosync-cfgtool -s

[root@clusterserver1 corosync]# corosync-cfgtool -s
Printing ring status.
Local node ID 1
RING ID 0
id = 192.168.1.20
status = ring 0 active with no faults
[root@clusterserver1 corosync]#

[root@clusterserver2 ~]# corosync-cfgtool -s
Printing ring status.
Local node ID 2
RING ID 0
id = 192.168.1.21
status = ring 0 active with no faults
[root@clusterserver2 ~]#

Verify Corosync Installation
•corosync-cmapctl | grep members

[root@clusterserver2 ~]# corosync-cfgtool -s
Printing ring status.
Local node ID 2
RING ID 0
id = 192.168.1.21
status = ring 0 active with no faults
[root@clusterserver2 ~]# corosync-cmapctl | grep members
runtime.totem.pg.mrp.srp.members.1.config_version (u64) = 0
runtime.totem.pg.mrp.srp.members.1.ip (str) = r(0) ip(192.168.1.20)
runtime.totem.pg.mrp.srp.members.1.join_count (u32) = 1
runtime.totem.pg.mrp.srp.members.1.status (str) = joined
runtime.totem.pg.mrp.srp.members.2.config_version (u64) = 0
runtime.totem.pg.mrp.srp.members.2.ip (str) = r(0) ip(192.168.1.21)
runtime.totem.pg.mrp.srp.members.2.join_count (u32) = 1
runtime.totem.pg.mrp.srp.members.2.status (str) = joined
runtime.totem.pg.mrp.srp.members.3.config_version (u64) = 0
runtime.totem.pg.mrp.srp.members.3.ip (str) = r(0) ip(192.168.1.22)
runtime.totem.pg.mrp.srp.members.3.join_count (u32) = 1
runtime.totem.pg.mrp.srp.members.3.status (str) = joined
[root@clusterserver2 ~]#

Verify Corosync Installation
•crm_verify -L -V

[root@clusterserver2 ~]# crm_verify -L -V
error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
Errors found during check: config not valid
[root@clusterserver2 ~]#

Leave a Reply

You can use these HTML tags

<a href="" title=""> <abbr title=""> <acronym title=""> <b> <blockquote cite=""> <cite> <code> <del datetime=""> <em> <i> <q cite=""> <s> <strike> <strong>