为了测试方便:全部机子关闭防火墙或清空防火墙规则

corosync  v2  +  pacemaker

RHEL 7.0: node1192.168.10.201  主机名:node1

RHEL 7.3:node2:  192.168.10.202  主机名:node2

RHEL 7.3:node3:  192.168.10.203  主机名:node3

RHEL 7.3:node4:  192.168.10.203  主机名:node4

node1node2node3node4/etc/hosts文件中添加:

192.168.10.201  node1

192.168.10.202  node2

192.168.10.203  node3

192.168.10.204  node4

·node4设置免密ssh登录node1node2node3node4

·设置node1node2node3node4时间同步,可以使用ntpdate或者使用硬件时钟同步(4台机时间相同,设置过程略)

1node2node3node4安装corosync pacemaker

yum install corosync pacemaker -y

2、配置corosync

[root@node4 ~]# cd /etc/corosync/

[root@node4 corosync]# cp corosync.conf.example corosync.conf

[root@node4 corosync]# vim corosync.conf

 totem {

       version: 2

       crypto_cipher: aes128

       crypto_hash: sha1

        secauth: on

       interface {

              ringnumber: 0

              bindnetaddr: 192.168.10.0

              mcastaddr: 239.185.1.31

              mcastport: 5405

              ttl: 1

       }

}

nodelist { #节点设置

     node {

            ring0_addr:192.168.10.204

            nodeid:1

     }

     node {

            ring0_addr:192.168.10.203

            nodeid:2

     }

     node {

            ring0_addr:192.168.10.202

            nodeid:3

     }

}

logging {

       fileline: off

       to_stderr: no

       to_logfile: yes

       logfile: /var/log/cluster/corosync.log

       to_syslog: no

       debug: off

       timestamp: on

       logger_subsys {

              subsys: QUORUM

              debug: off

       }

}

quorum {  # 指定仲裁机制是corosync投票系统

       provider: corosync_votequorum

}

3、在node4/etc/corosync目录中生成密钥

[root@node4 corosync]# corosync-keygen

将密钥和配置文件发给node2node3

[root@node4 corosync]# scp -p authkey corosync.conf root@192.168.10.202:/etc/corosync/

[root@node4 corosync]# scp -p authkey corosync.conf root@192.168.10.203:/etc/corosync/

4node2node3node4配置pacemaker

# vim /etc/sysconfig/pacemaker

PCMK_logfile=/var/log/pacemaker.log

5、安装crmsh,安装在node2

下载以下安装包:

crmsh-3.0.0-6.2.noarch.rpm          python-parallax-1.0.0a1-7.1.noarch.rpm

crmsh-scripts-3.0.0-6.2.noarch.rpm

python-pssh-2.3.1-7.2.noarch.rpm

pssh-2.3.1-7.2.noarch.rpm

下载地址:

http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7/noarch/

http://rpm.pbone.net/index.php3/stat/4/idpl/30426901/dir/centos_7/com/python-parallax-1.0.0a1-7.1.noarch.rpm.html

blob.png

HA  Web  Service构建:


vip192.168.10.92ocf:hearbeat:IPaddr

httpd:  systemd

nfs  shared  storage: ocf: hearbeat:Filesystem

  node2node3nod4:提供httpd服务   node1:提供nfs共享服务

  node234启动corosyncpacemaker服务

一、服务安装设置

1node1设置:共享目录为:/nfs/htdocs

1、安装nfs

[root@node1 ~]# yum install nfs-utils -y

2、创建共享目录

[root@node1 ~]# mkdir /nfs/htdocs -pv

mkdir: created directory ‘/nfs’

mkdir: created directory ‘/nfs/htdocs’

3、创建测试文件

[root@node1 ~]# echo "<h1><font color="red">Test Page on NFS Server…</font></h1>"> /nfs/htdocs/index.html

4、配置export文件

[root@node1 ~]# vim /etc/exports

/nfs/htdocs  *(rw)

5、启动nfs服务

[root@node1 ~]# systemctl start rpcbind

[root@node1 ~]# systemctl start nfs-server

[root@node1 ~]# systemctl enable rpcbind

[root@node1~]# systemctl enable nfs-server

2node2node3node4安装httpd服务

1node2node3node4安装httpd服务:

# yum install httpd -y

2node2node3node4设置SELinux

# setsebool -P httpd_use_nfs 1

3node2node3node4httpd服务添加到开机启动

# systemctl enable httpd

二、集群配置

前面crmsh安装在node2

[root@node2 ~]# crm configuration

crm(live)configure# property stonith-enabled=false

crm(live)configure# verify

crm(live)configure# commit

crm(live)configure#

blob.png

1、配置vipwebserver

crm(live)configure# primitive webip ocf:heartbeat:IPaddr2 params ip="192.168.10.92" op monitor interval=30s timeout=20s

crm(live)configure# primitive webserver systemd:httpd op monitor interval=20s timeout=20s

crm(live)configure# verify

blob.png

2nfs配置

crm(live)configure# primitive webstore ocf:heartbeat:Filesystem params device="192.168.10.204:/nfs/htdocs" directory="/var/www/html" fstype="nfs" op start timeout=60s op stop timeout=60s op monitor interval=20s timeout=40s

blob.png

3、设置约束,webserverwebstorewebip必须同时在同一个机子上

crm(live)configure# colocation webserver_with_webstore_and_webip inf: webserver ( webip webstore)

blob.png

设置启动顺序,先启动ip再启动存储再启动web服务

crm(live)configure# order webstore_after_webip Mandatory: webip webstore

crm(live)configure# order webserver_after_webstore Mandatory: webstore webserver

blob.png

前面的设置好之后,最后才提交设置:

crm(live)configure# commit

blob.png

查看一下配置情况:

blob.png

node2node3node4都要设置SELinux:

#setsebool -P httpd_use_nfs 1

查看状态:

crm(live)configure# cd

crm(live)# status

blob.png

OK。。。配置正确

浏览器打开:192.168.10.92

blob.png

OK。。。。


节点下线:crm node standby

节点上线:crm node online

location约束:位置约束,资源对节点的倾向性

比如web服务倾向于node3

crm(live)configure# location webservice_pref_node3 webip 100: node3

crm(live)configure# verify

crm(live)configure# commit

blob.png

原来的资源在node2上,现在在node3上:

blob.png


                     集群构建2                

 

1node2node3node4停止corosyncpacemaker服务,安装pcs启动pcsd服务。yum install pcs -y

2node1配置好ansiblehosts文件:

[ha]

192.168.10.204

192.168.10.202

192.168.10.203

3、设置认证用户密码:

[root@node1 ~]# ansible ha -m shell -a 'echo 123456 | passwd –stdin hacluster'

4node4上做认证测试:

[root@node4 ~]# pcs cluster auth node2 node3 node4 -u hacluster

blob.png

认证通过

5、构建集群

[root@node4 ~]# pcs cluster setup –name=mycluster node2 node3 node4 –force

Destroying cluster on nodes: node2, node3, node4…

node2: Stopping Cluster (pacemaker)…

node3: Stopping Cluster (pacemaker)…

node4: Stopping Cluster (pacemaker)…

node3: Successfully destroyed cluster

node2: Successfully destroyed cluster

node4: Successfully destroyed cluster

Sending 'pacemaker_remote authkey' to 'node2', 'node3', 'node4'

node4: successful distribution of the file 'pacemaker_remote authkey'

node2: successful distribution of the file 'pacemaker_remote authkey'

node3: successful distribution of the file 'pacemaker_remote authkey'

Sending cluster config files to the nodes…

node2: Succeeded

node3: Succeeded

node4: Succeeded

Synchronizing pcsd certificates on nodes node2, node3, node4…

node3: Success

node2: Success

node4: Success

Restarting pcsd on the nodes in order to reload the certificates…

node4: Success

node3: Success

node2: Success

blob.png

6、启动集群

[root@node4 ~]# pcs cluster start –all

blob.png

[root@node4 ~]# corosync-cfgtool -s

Printing ring status.

Local node ID 3

RING ID 0

       id    = 192.168.10.204

       status   = ring 0 active with no faults

[root@node4 ~]# corosync-cmapctl | grep members

blob.png

7、定义资源webipwebstorewebserver

[root@node4 ~]# pcs resource create webip ocf:heartbeat:IPaddr ip="192.168.10.93" op monitor interval=20s timeout=10s

[root@node4 ~]# pcs resource create webstore ocf:heartbeat:Filesystem device="192.168.10.201:/nfs/htdocs" directory="/var/www/html" fstype=nfs op start timeout=60s op stop timeout=60s op monitor interval=20s timeout=40s

[root@node4 ~]# pcs resource create webserver systemd:httpd op monitor interval=30s timeout=20s

8、资源分组排序webip ->webstore-> webserver

[root@node4 ~]# pcs resource group add webservice webip webstore webserver

blob.png

查看集群状态:

blob.png

浏览器打开:192.168.10.93

blob.png 

OK。。。构建成功!


节点下线:[root@node4 ~]# pcs cluster standby node2

节点上线:[root@node4 ~]# pcs cluster unstandby node2

资源约束:

[root@node4 ~]# pcs constraint location add webservice_pref_node4 webservice node4 100

blob.png

 

分类: Linux服务架构

3 条评论

小伟 · 2017年12月18日 下午1:49

可以写个搭建Linux WordPress的教程么,想学 🙂

    caomuzhong · 2018年1月7日 下午7:07

    搭建lamp+WordPress即可,很简单的

评论已关闭。