(源自網絡經本人修改)CentOS 下的高可用性群集的安裝
- 詳細內容
- 分類: Cluster
- 發佈: 2006-05-28, 週日 14:28
- 作者 Super User
- 點擊數: 28458
Virtual IP: 192.168.136.50
Load Balancer: 192.168.136.51
Backup: 192.168.136.52
Real Server 1: 192.168.136.251
Real Server 2: 192.168.136.252
install patch kenel(IPVS and ARP hidden)(all server)
1)rpm -Fvh mkinitrd-3.5.13-1.um.1.i386.rpm
2)rpm -Uvh mkinitrd-debuginfo-3.5.13-1.um.1.i386.rpm
3)rpm -Fvh kernel-2.4.21-27.0.2.EL.um.1.i686.rpm
install ipvsadm(Load Banlancer and Backup)
1)rpm -Uvh ipvsadm-1.21-1.rh.el.1.i386.rpm
2)rpm -Uvh ipvsadm-debuginfo-1.21-1.rh.el.1.i386.rpm
3)vi /etc/sysconfig/ipvsadm
-A -t 192.168.1.50:80 -s rr
-a -t 192.168.1.50:80 -r 192.168.1.51:80 -i
-a -t 192.168.1.50:80 -r 192.168.1.52:80 -i
-a -t 192.168.1.50:80 -r 192.168.1.251:80 -i
-a -t 192.168.1.50:80 -r 192.168.1.252:80 -i
4)vi /etc/hosts
192.168.1.51 loadbalancer.com
192.168.1.52 backup.com
5)rpm -Uvh glib2-devel-2.2.3-2.0.i386.rpm
6)rpm -Uvh libnet-1.1.0-1.rh.el.1.i386.rpm
7)rpm -Uvh libnet-debuginfo-1.1.0-1.rh.el.1.i386.rpm
install HeartBeat(Load Banlancer and Backup)
1)rpm -Uvh heartbeat-pils-1.0.4-2.rh.el.um.1.i386.rpm
2)rpm -Uvh heartbeat-stonith-1.0.4-2.rh.el.um.1.i386.rpm
3)rpm -Uvh heartbeat-1.0.4-2.rh.el.um.1.i386.rpm
4)rpm -Uvh heartbeat-debuginfo-1.0.4-2.rh.el.um.1.i386.rpm
5)rpm -Uvh perl-Parse-RecDescent-1.80-1.rh.el.um.1.noarch.rpm
6)rpm -Uvh perl-Mail-IMAPClient-2.2.7-1.rh.el.um.1.noarch.rpm
7)rpm -Uvh perl-Net-SSLeay-1.23-1.rh.el.um.1.i386.rpm
8)rpm -Uvh perl-Net-SSLeay-debuginfo-1.23-1.rh.el.um.1.i386.rpm
9)rpm -Uvh perl-Authen-SASL-2.03-1.rh.el.um.1.noarch.rpm
10)rpm -Uvh perl-Convert-ASN1-0.16-2.rh.el.um.1.noarch.rpm
11)rpm -Uvh perl-IO-Socket-SSL-0.92-1.rh.el.um.1.noarch.rpm
12)rpm -Uvh perl-XML-NamespaceSupport-1.08-1.rh.el.um.1.noarch.rpm
13)rpm -Uvh perl-XML-SAX-0.12-1.rh.el.um.1.noarch.rpm
14)rpm -Uvh perl-ldap-0.2701-1.rh.el.um.1.noarch.rpm
15)rpm -Uvh heartbeat-ldirectord-1.0.4-2.rh.el.um.1.i386.rpm
16)chkconfig --add heartbeat
17)chkconfig heartbeat on
18)chkconfig --del ldirectord
19)cp -rp /usr/share/doc/heartbeat-1.0.4/ha.cf /etc/ha.d/
20)cp -rp /usr/share/doc/heartbeat-1.0.4/haresources /etc/ha.d/
21)cp -rp /usr/share/doc/heartbeat-1.0.4/authkeys /etc/ha.d/
22)cp -rp /usr/share/doc/heartbeat-ldirectord-1.0.4/ldirectord.cf /etc/ha.d/
23)vi /etc/ha.d/ha.cf
#
# There are lots of options in this file. All you have to have is a set
# of nodes listed {"node ...}
# and one of {serial, bcast, mcast, or ucast}
#
# ATTENTION: As the configuration file is read line by line,
# THE ORDER OF DIRECTIVE MATTERS!
#
# In particular, make sure that the timings and udpport
# et al are set before the heartbeat media are defined!
# All will be fine if you keep them ordered as in this
# example.
#
#
# Note on logging:
# If any of debugfile, logfile and logfacility are defined then they
# will be used. If debugfile and/or logfile are not defined and
# logfacility is defined then the respective logging and debug
# messages will be loged to syslog. If logfacility is not defined
# then debugfile and logfile will be used to log messges. If
# logfacility is not defined and debugfile and/or logfile are not
# defined then defaults will be used for debugfile and logfile as
# required and messages will be sent there.
#
# File to write debug messages to
#debugfile /var/log/ha-debug
#
#
# File to write other messages to
#
logfile /var/log/ha-log
#
#
# Facility to use for syslog()/logger
#
logfacility local0
#
#
# A note on specifying "how long" times below...
#
# The default time unit is seconds
# 10 means ten seconds
#
# You can also specify them in milliseconds
# 1500ms means 1.5 seconds
#
#
# keepalive: how long between heartbeats?
#
keepalive 2
#
# deadtime: how long-to-declare-host-dead?
#
deadtime 30
#
# warntime: how long before issuing "late heartbeat" warning?
# See the FAQ for how to use warntime to tune deadtime.
#
warntime 10
#
#
# Very first dead time (initdead)
#
# On some machines/OSes, etc. the network takes a while to come up
# and start working right after you've been rebooted. As a result
# we have a separate dead time for when things first come up.
# It should be at least twice the normal dead time.
#
initdead 120
#
#
# nice_failback: determines whether a resource will
# automatically fail back to its "primary" node, or remain
# on whatever node is serving it until that node fails.
#
# The default is "off", which means that it WILL fail
# back to the node which is declared as primary in haresources
#
# "on" means that resources only move to new nodes when
# the nodes they are served on die. This is deemed as a
# "nice" behavior (unless you want to do active-active).
#
nice_failback on
#
# hopfudge maximum hop count minus number of nodes in config
#hopfudge 1
#
#
# Baud rate for serial ports...
# (must precede "serial" directives)
#
#baud 19200
#
# serial serialportname ...
#serial /dev/ttyS0 # Linux
#serial /dev/cuaa0 # FreeBSD
#serial /dev/cua/a # Solaris
#
# What UDP port to use for communication?
# [used by bcast and ucast]
#
#udpport 694
#
# What interfaces to broadcast heartbeats over?
#
#bcast eth0 # Linux
#bcast eth1 eth2 # Linux
#bcast le0 # Solaris
#bcast le1 le2 # Solaris
#
# Set up a multicast heartbeat medium
# mcast [dev] [mcast group] [port] [ttl] [loop]
#
# [dev] device to send/rcv heartbeats on
# [mcast group] multicast group to join (class D multicast address
# 224.0.0.0 - 239.255.255.255)
# [port] udp port to sendto/rcvfrom (no reason to differ
# from the port used for broadcast heartbeats)
# [ttl] the ttl value for outbound heartbeats. This affects
# how far the multicast packet will propagate. (1-255)
# [loop] toggles loopback for outbound multicast heartbeats.
# if enabled, an outbound packet will be looped back and
# received by the interface it was sent on. (0 or 1)
# This field should always be set to 0.
#
#
mcast eth0 225.0.0.1 694 1 0
#
# Set up a unicast / udp heartbeat medium
# ucast [dev] [peer-ip-addr]
#
# [dev] device to send/rcv heartbeats on
# [peer-ip-addr] IP address of peer to send packets to
#
#ucast eth0 192.168.1.2
#
#
# Watchdog is the watchdog timer. If our own heart doesn't beat for
# a minute, then our machine will reboot.
#
#watchdog /dev/watchdog
#
# "Legacy" STONITH support
# Using this directive assumes that there is one stonith
# device in the cluster. Parameters to this device are
# read from a configuration file. The format of this line is:
#
# stonith <stonith_type> <configfile>
#
# NOTE: it is up to you to maintain this file on each node in the
# cluster!
#
#stonith baytech /etc/ha.d/conf/stonith.baytech
#
# STONITH support
# You can configure multiple stonith devices using this directive.
# The format of the line is:
# stonith_host <hostfrom> <stonith_type> <params...>
# <hostfrom> is the machine the stonith device is attached
# to or * to mean it is accessible from any host.
# <stonith_type> is the type of stonith device (a list of
# supported drives is in /usr/lib/stonith.)
# <params...> are driver specific parameters. To see the
# format for a particular device, run:
# stonith -l -t <stonith_type>
#
#
# Note that if you put your stonith device access information in
# here, and you make this file publically readable, you're asking
# for a denial of service attack ;-)
#
#
#stonith_host * baytech 10.0.0.3 mylogin mysecretpassword
#stonith_host ken3 rps10 /dev/ttyS1 kathy 0
#stonith_host kathy rps10 /dev/ttyS1 ken3 0
#
# Tell what machines are in the cluster
# node nodename ... -- must match uname -n
#node ken3
#node kathy
node loadbalancer.com
node backup.com
#
# Less common options...
#
# Treats 10.10.10.254 as a psuedo-cluster-member
#
#ping 10.10.10.254
#
# Started and stopped with heartbeat. Restarted unless it exits
# with rc=100
#
#respawn userid /path/name/to/run
24)vi /etc/ha.d/haresources
loadbalancer.com lvs IPaddr::192.168.1.50/24/eth0 ipvsadm ldirectord
#backup.com lvs IPaddr::192.168.1.50/24/eth0 ipvsadm ldirectord
25)vi /etc/ha.d/authkeys
#auth 1
auth 3
#1 crc
#2 sha1 HI!
3 md5 Hello!
26)chmod 600 /etc/ha.d/authkeys
27)vi /etc/ha.d/ldirectord.cf
# Global Directives
checktimeout=3
checkinterval=1
fallback=127.0.0.1:80
autoreload=yes
logfile="/var/log/ldirectord.log"
#logfile="local0"
quiescent=yes
# A sample virual with a fallback that will override the gobal setting
virtual=192.168.1.50:80
real=192.168.1.51:80 ipip
real=192.168.1.52:80 ipip
real=192.168.1.251:80 ipip
real=192.168.1.252:80 ipip
fallback=127.0.0.1:80 gate
service=http
request="real-server.html"
receive="newsbook"
scheduler=rr
#persistent=600
#netmask=255.255.255.255
protocol=tcp
28)vi /etc/ha.d/resource.d/lvs
#!/bin/sh
# chkconfig: 2345 90 10
# description: Preparing for Load Balancer and Real Server switching
VIP=192.168.1.50
. /etc/rc.d/init.d/functions
case "$1" in
start)
echo "Preparing for Load Balancer"
ifconfig tunl0 down
echo 1 > /proc/sys/net/ipv4/ip_forward
echo 0 > /proc/sys/net/ipv4/conf/all/hidden
;;
stop)
echo "Preparing for Real Server"
ifconfig tunl0 $VIP netmask 255.255.255.255 broadcast $VIP up
echo 1 > /proc/sys/net/ipv4/ip_forward
echo 1 > /proc/sys/net/ipv4/conf/all/hidden
echo 1 > /proc/sys/net/ipv4/conf/tunl0/hidden
;;
*)
echo "Usage: lvs {start|stop}"
exit 1
esac
29)chmod 755 /etc/ha.d/resource.d/lvs
30)/etc/init.d/heartbeat start
about real server setting
1)vi /etc/init.d/tunl
#!/bin/sh
# chkconfig: 2345 70 10
# description: Config tunl port and apply arp patch
VIP=192.168.1.50
. /etc/rc.d/init.d/functions
case "$1" in
start)
echo "Tunl port starting"
ifconfig tunl0 $VIP netmask 255.255.255.255 broadcast $VIP up
echo 1 > /proc/sys/net/ipv4/ip_forward
echo 1 > /proc/sys/net/ipv4/conf/all/hidden
echo 1 > /proc/sys/net/ipv4/conf/tunl0/hidden
;;
stop)
echo "Tunl port closing"
ifconfig tunl0 down
echo 1 > /proc/sys/net/ipv4/ip_forward
echo 0 > /proc/sys/net/ipv4/conf/all/hidden
;;
*)
echo "Usage: $0 {start|stop}"
exit 1
esac
2)chmod 755 /etc/init.d/tunl
3)chkconfig --add tunl
4)chkchnfig tunl on
5)add the index.html file
Test Page