Percona-XtraDB-Cluster+Haproxy 搭建集群环境
环境准备及服务器信息:
配置防火墙
firewall-cmd --add-port=3306/tcp --permanent
firewall-cmd --add-port=4567/tcp --permanent
firewall-cmd --add-port=4568/tcp --permanent
firewall-cmd --add-port=4444/tcp --permanent
firewall-cmd --reload
安装官方yum源repo配置文件
yum install http://www.percona.com/downloads/percona-release/redhat/0.1-4/percona-release-0.1-4.noarch.rpm
安装pxc
yum -y install Percona-XtraDB-Cluster-57
yum install Percona-Server-client-57
创建用户和组
groupadd mysql
useradd -g mysql -s /sbin/nologin mysql
创建目录并赋权
mkdir /data/mysql/{data,binlog,slow,logs} -p
touch /data/mysql/logs/mysqld.log
chown -R mysql:mysql /data/mysql
修改/etc/my.cnf配置文件
vi /etc/my.cnf
增加[mysqld] ##已知BUG
vi /etc/percona-xtradb-cluster.conf.d/mysqld.cnf
替换为:
[client]
port =
socket = /data/mysql/data/mysql.sock
default-character-set = utf8mb4 [mysqld]
# basic settings #
user = mysql
port=
sql_mode = "STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION,NO_ZERO_DATE,NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER"
autocommit =
server-id=
character_set_server=utf8mb4
init_connect='SET NAMES utf8'
transaction_isolation = READ-COMMITTED
lower_case_table_names =
explicit_defaults_for_timestamp =
max_allowed_packet =
event_scheduler =
datadir = /data/mysql/data
basedir = /var/lib/mysql
pid-file = /data/mysql/data/mysqld.pid
socket = /data/mysql/data/mysql.sock
default-time_zone = '+8:00' # connection #
interactive_timeout =
wait_timeout =
lock_wait_timeout =
skip_name_resolve =
max_connections =
max_connect_errors = # table cache performance settings
table_open_cache =
table_definition_cache =
table_open_cache_instances = # session memory settings #
read_buffer_size = 5M
read_rnd_buffer_size = 10M
sort_buffer_size = 10M
tmp_table_size = 25M
join_buffer_size = 40M
thread_cache_size = 20M # log settings #
log_error = /data/mysql/logs/mysqld.log
slow_query_log_file = /data/mysql/slow/slow.log
log-bin= /data/mysql/binlog/mysql-bin
relay_log = mysql-relay-bin
general_log_file= general.log slow_query_log =
log_queries_not_using_indexes =
log_slow_admin_statements =
log_slow_slave_statements =
log_throttle_queries_not_using_indexes =
long_query_time =
min_examined_row_limit =
binlog-rows-query-log-events =
log-bin-trust-function-creators =
expire-logs-days =
log-slave-updates = # innodb settings #
innodb_page_size =
innodb_buffer_pool_size = 256M
innodb_buffer_pool_instances =
innodb_buffer_pool_load_at_startup =
innodb_buffer_pool_dump_at_shutdown =
innodb_lru_scan_depth =
innodb_lock_wait_timeout =
innodb_io_capacity =
innodb_io_capacity_max =
innodb_flush_method = O_DIRECT
innodb_file_format = Barracuda
innodb_file_format_max = Barracuda #undo
innodb_undo_directory = /data/mysql/data
innodb_undo_logs =
innodb_undo_tablespaces = #redo
innodb_log_group_home_dir = /data/mysql/data
innodb_log_file_size = 10M
innodb_log_files_in_group = innodb_flush_neighbors =
innodb_log_buffer_size =
innodb_purge_threads =
innodb_large_prefix =
innodb_thread_concurrency =
innodb_print_all_deadlocks =
innodb_strict_mode =
innodb_sort_buffer_size =
innodb_write_io_threads =
innodb_read_io_threads =
innodb_file_per_table =
innodb_stats_persistent_sample_pages =
innodb_autoinc_lock_mode =
innodb_online_alter_log_max_size=100M
innodb_open_files= # replication settings #
master_info_repository = TABLE
relay_log_info_repository = TABLE
sync_binlog =
gtid_mode = on
enforce_gtid_consistency =
log_slave_updates
binlog_format = ROW
binlog_rows_query_log_events =
relay_log = relay.log
relay_log_recovery =
slave_skip_errors = ddl_exist_errors
slave-rows-search-algorithms = 'INDEX_SCAN,HASH_SCAN' [mysqld-5.6]
# metalock performance settings
metadata_locks_hash_instances= [mysqld-5.7]
# new innodb settings #
loose_innodb_numa_interleave=
innodb_buffer_pool_dump_pct =
innodb_page_cleaners =
innodb_undo_log_truncate =
innodb_max_undo_log_size = 100M #2G
innodb_purge_rseg_truncate_frequency =
# new replication settings #
slave-parallel-type = LOGICAL_CLOCK
slave-parallel-workers =
slave_preserve_commit_order=
slave_transaction_retries=
# other change settings #
binlog_gtid_simple_recovery=
log_timestamps=system
show_compatibility_56=on # Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=
vi /etc/percona-xtradb-cluster.conf.d/mysqld_safe.cnf
替换pid-file、socket为:
pid-file = /data/mysql/data/mysqld.pid
socket = /data/mysql/data/mysql.sock
vi /etc/percona-xtradb-cluster.conf.d/wsrep.cnf
修改:
wsrep_cluster_address=gcomm://192.168.253.28,192.168.253.29,192.168.253.30 ##根据实际修改
wsrep_retry_autocommit=1
wsrep_auto_increment_control=1
wsrep_node_name =pxc-linux-29 ##根据实际修改
wsrep_node_address=192.168.253.29 ##根据实际修改
wsrep_sst_method=xtrabackup-v2
wsrep_cluster_name=test-pxc
wsrep_sst_auth="sstuser:s3cretPass" ##根据实际修改
第一个节点启动
systemctl start mysql@bootstrap
备注:谨记,只要是启动集群的第一个Node(首次搭建集群或者集群全部关闭),都要用此命令
修改密码
mysql5.7版本日志均在error.log 里面生成
grep "temporary password" /data/logs/mysql/mysqld.log 使用改密码登陆MySQL,修改成自己想要的密码
mysql> alter user 'root'@'localhost' idnetified by 'abc123'; 创建SST同步用户
mysql> GRANT PROCESS,RELOAD,LOCK TABLES,REPLICATION CLIENT ON *.* TO 'sstuser'@'192.168.%.%' IDENTIFIED BY 's3cretPass';
mysql> flush privileges;
其它节点启动
systemctl start mysql
备注:谨记,只要集群有一个Node启动,其余节点都是用此命令
Haproxy负载均衡
Haproxy是一个反向代理负载均衡解决方案,支持4层和7层模式,提供后端服务器健康检查,非常稳定。淘宝前期也使用Haproxy作为CDN系统负载均衡器
安装haproxy
yum -y install haproxy
在cluster的MySQL上创建用户(一个节点创建,会被复制到其它节点)
监控用帐号:
grant usage on *.* to 'pxc-monitor'@'%' identified by 'testpxc'; 服务测试帐号:
grant all privileges on *.* to 'zxw'@'%' identified by 'xxwzopop';
配置haproxy.cfg
vi /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn
user haproxy
group haproxy
daemon
defaults
mode tcp
log global
option tcplog
option dontlognull
retries
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn
frontend mysql
bind *:
mode tcp
#log global
option tcplog
default_backend mysqlservers
backend mysqlservers
balance leastconn
server dbsrv1 10.10.48.62: check port rise fall maxconn
server dbsrv2 10.10.48.64: check port rise fall maxconn
server dbsrv2 10.10.48.66: check port rise fall maxconn
## 定义一个监控页面,监听在1080端口,并启用了验证机制
listen stats
mode http
bind 0.0.0.0:
stats enable
stats hide-version
stats uri /haproxyadmin?stats
stats realm Haproxy\ Statistics
stats auth admin:admin
stats admin if TRUE
配置haproxy的日志:
安装完HAProxy后,默认情况下,HAProxy为了节省读写IO所消耗的性能,默认情况下没有日志输出,一下是开启日志的过程
yum -y install rsyslog # vim /etc/rsyslog.conf
#
#
...........
$ModLoad imudp
$UDPServerRun 514 //rsyslog 默认情况下,需要在514端口监听UDP,所以可以把这两行注释掉
.........
local2.* /var/log/haproxy.log //和haproxy的配置文件中定义的log level一致 systemctl start rsyslog
在PXC 每个mysql节点安装mysql健康状态检查脚本(需要在pxc的每个节点执行)
脚本拷贝
# cp /usr/local/mysql/bin/clustercheck /usr/bin/
# cp /usr/local/mysql/xinetd.d/mysqlchk /etc/xinetd.d/ ps:clustercheck和脚本都是默认值没有修改
创建mysql用户,用于mysql健康检查(在任一节点即可):
grant process on *.* to 'clustercheckuser'@'localhost' identified by 'clustercheckpassword!';
flush privileges; ps:如不使用clustercheck中默认用户名和密码,将需要修改clustercheck脚本,MYSQL_USERNAME和MYSQL_PASSWORD值 更改用户名和密码(三个节点都得修改) #vim /usr/bin/clustercheck
MYSQL_USERNAME="pxc-monitor"
MYSQL_PASSWORD="testpxc"
更改/etc/services添加mysqlchk的服务端口号:
echo 'mysqlchk 9200/tcp # mysqlchk' >> /etc/services
安装xinetd服务,通过守护进程来管理mysql健康状态检查脚本
yum -y install xinetd ###非常重要
systemctl enable xinetd
systemctl start xinetd
clustercheck脚本测试
/usr/bin/clustercheck HTTP/1.1 200 OK
Content-Type: text/plain
Connection: close
Content-Length: 40 Percona XtraDB Cluster Node is synced. ps:要保证状态为200,否则检测不通过,可能是mysql服务不正常,或者环境不对致使haproxy无法使用mysql
haproxy如何侦测 MySQL Server 是否存活,靠着就是 9200 port,透过 Http check 方式,让 HAProxy 知道 PXC 状态
启动haproxy
#启动命令
haproxy -f /etc/haproxy/haproxy.cfg #检查后台进程
ps -ef |grep haproxy
haproxy 9754 0 0 11:29 ? 00:00:00 haproxy -f /etc/haproxy/haproxy.cfg
root 9823 74 0 11:30 ? 00:00:00 grep --color=auto haproxy
#检查端口情况
netstat -nlap |grep haproxy
tcp 0 0 0.0.0.0:3307 0.0.0.0:* LISTEN 9754/haproxy
tcp 0 0 0.0.0.0:8088 0.0.0.0:* LISTEN 9754/haproxy
udp 0 0 0.0.0.0:59349 0.0.0.0:* 9754/haproxy
unix 2 [ ACC ] STREAM LISTENING 30637572 9754/haproxy /var/lib/haproxy/stats.9753.tmp
#配置开机自启动
# cp /usr/local/sbin/haproxy /usr/sbin/haproxy
cd /opt/soft/haproxy-1.5.3/examples
[root@db169 examples]# cp haproxy.init /etc/init.d/haproxy
[root@db169 examples]# chmod +x /etc/init.d/haproxy
haproxy测试
在mysql pxc创建测试账号: grant all privileges on *.* to 'zxw'@'%' identified by 'xxwzopop';
#for i in `seq 1 10`;do mysql -h 192.168.1.163 -P3307 -uzxw -pxxwzopop -e "select @@hostname;";done 注:其实可以只允许haproxy侧的IP访问即可,因用户通过vip访问mysql集群,haproxy根据调度策略使用自己的ip创建与后端mysql服务器的连接。
查看Haproxy状态:
http://192.168.1.163:8088/haproxy/stats
输入用户密码:stats auth pxcstats:xxwzopop
用keepalived实现haproxy 的高可用
安装
yum install -y gcc openssl-devel popt-devel ipvsadm
yum -y install kernel kernel-devel* popt popt-devel libssl-dev libnl libnl-devel openssl openssl-* ipvsadm libnfnetlink-devel
yum install keepalived -y
yum install MySQL-python -y
mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
开启防火墙VRRP
#开启vrrp 协议 否则会出现双VIP的情况 firewall-cmd --direct --permanent --add-rule ipv4 filter INPUT 0 --protocol vrrp -j ACCEPT
firewall-cmd --reload
配置
vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id haproxy_pxc #keepalived组的名称
}
vrrp_script chk_haprocy {
script "/etc/keepalived/check_haproxy.sh"
interval 2
weight 2
}
vrrp_instance VI_HAPROXY {
state MASTER #备份机是BACKUP
#nopreempt #非抢占模式
interface eth0
virtual_router_id 51 #同一集群中该数值要相同,只能从1-255
priority 100 //备库可以90
advert_int 1
authentication {
auth_type PASS #Auth 用密码,但密码不要超过8位
auth_pass 1111
}
track_script {
chk_haprocy
}
virtual_ipaddress {
192.168.1.188/24
}
}
vi /etc/keepalived/check_haproxy.sh
#!/bin/bash
A=`ps -C haproxy --no-header |wc -l`
if [ $A -eq 0 ];then
/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg
sleep 3
if [ `ps -C haproxy --no-header |wc -l` -eq 0 ];then
/etc/init.d/keepalived stop
fi
fi chmod 755 /etc/keepalived/check_haproxy.sh
# 迁移数据 vi /etc/percona-xtradb-cluster.conf.d/mysqld.cnf
replicate-do-db=dbtest 导入昨天全量备份数据 change master to master_host='10.200.22.33',master_port=3306,master_user='repl',master_password='mysql',master_log_file='master-bin.000009',master_log_pos=674; start slave;
PMM监控:
pmm server:
docker pull percona/pmm-server:latest
mkdir -p /data/pmm_data/opt/prometheus/data
mkdir -p /data/pmm_data/opt/consul-data
mkdir -p /data/pmm_data/var/lib/mysql
mkdir -p /data/pmm_data/var/lib/grafana
docker create -v /data/pmm_data/opt/prometheus/data -v /data/pmm_data/opt/consul-data -v /data/pmm_data/var/lib/mysql -v /data/pmm_data/var/lib/grafana --name pmm-data percona/pmm-server:latest /bin/true docker run -d \
-e ORCHESTRATOR_ENABLED=true \
-e METRICS_RETENTION=720h \
-e SERVER_USER=admin \
-e SERVER_PASSWORD=abcd.1234 \
-p 8080:80 \
--net staticnet \
--ip 192.168.0.11 \
--volumes-from pmm-data \
--name pmm-server \
--restart always percona/pmm-server:latest
pmm client:
yum -y install http://www.percona.com/downloads/percona-release/redhat/0.1-4/percona-release-0.1-4.noarch.rpm
yum install pmm-client -y
新加入配置
pmm-admin config --server 10.1.12.114:8080 --server-user admin --server-password abcd.1234
#添加linux监控
pmm-admin add linux:metrics
#创建MySQL监控账号并开启innodb_monitor_enable
GRANT SELECT, PROCESS, SHOW DATABASES, REPLICATION CLIENT ON *.* TO 'pmm'@'10.1.%' IDENTIFIED BY 'L2iLf#eqISQ613u^';
set global innodb_monitor_enable=all;
#添加MySQL监控
pmm-admin add mysql --user pmm --password 'L2iLf#eqISQ613u^' --host 10.1.21.33 --query-source perfschema #查看配置信息
pmm-admin list
#检查网络
pmm-admin check-network [linux:metrics] OK, already monitoring this system.
[mysql:metrics] OK, now monitoring MySQL metrics using DSN root:***@unix(/data/mysql/data/mysql.sock)
[mysql:queries] OK, now monitoring MySQL queries from slowlog using DSN root:***@unix(/data/mysql/data/mysql.sock)
前端访问地址:
浏览器输入Server IP : http://10.200.22.33:8881
输入默认的用户名密码:admin
PMM客户端安装(RPM包):
1.下载rpm安装
wget https://www.percona.com/downloads/pmm/1.17.1/binary/redhat/7/x86_64/pmm-client-1.17.1-1.el7.x86_64.rpm
2.安装
rpm -ivh pmm-client-1.17.1-1.el7.x86_64.rpm
3.检查PMM版本
pmm-admin --version
4.开通防火墙,并确认网络环境
firewall-cmd --zone=public --add-port=42000/tcp --permanent
firewall-cmd --zone=public --add-port=42002/tcp --permanent
firewall-cmd --reload
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1.先备份iptables
cp /etc/sysconfig/iptables /var/tmp
2.修改/etc/sysconfig/iptables文件
vi /etc/sysconfig/iptables
-A INPUT -p tcp -m state --state NEW -m tcp --dport 42000 -j ACCEPT
-A INPUT -p udp -m state --state NEW -m udp --dport 42000 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 42002 -j ACCEPT
-A INPUT -p udp -m state --state NEW -m udp --dport 42002 -j ACCEPT
3.重启防火墙
service iptables restart
5.配置
a.配置Server
pmm-admin config --server 10.1.12.114:8080 --server-user admin --server-password abcd.1234 --client-name=bxjc-m-48-12
b.添加linux监控
pmm-admin add linux:metrics
c.添加MySQL监控
#创建MySQL监控账号并开启innodb_monitor_enable
GRANT SELECT, PROCESS, SHOW DATABASES, REPLICATION CLIENT ON *.* TO 'pmm'@'10.1.%' IDENTIFIED BY 'L2iLf#eqISQ613u^';
set global innodb_monitor_enable=all;
pmm-admin add mysql --user pmm --password 'L2iLf#eqISQ613u^' --host 10.10.48.12 --port=3306 --query-source perfschema
d.检查配置情况
pmm-admin list
pmm-admin check-network