找人帮忙做网站,学it一年的学费大概是多少,泰州网站制作网站,创意设计作品赏析有同事问我#xff0c;三个redis sentinel节点#xff0c;宕机两个节点以后#xff0c;是否还能够正常的通过redis sentinel正常访问redis的数据。我想了想#xff0c;理论上是可以的#xff0c;但是我没试过#xff0c;今天有时间就测试了一下。搭建环境和测试代码的过程…有同事问我三个redis sentinel节点宕机两个节点以后是否还能够正常的通过redis sentinel正常访问redis的数据。我想了想理论上是可以的但是我没试过今天有时间就测试了一下。搭建环境和测试代码的过程一波三折以下是配置信息。虚拟机使用的是CENTOS7 docker是现成的redis镜像的版本redis_version:7.0.11 , python是python3
使用docker compose 快速启动 redis节点和sentinel集群这样会比较快。
docker-compose.yml
version: 3.0
services:master:image: rediscontainer_name: redis-masterports:- 6379:6379# 这个模式不要用 host会有问题用bridgenetwork_mode: bridgecommand: redis-server /usr/local/etc/redis/redis.confvolumes:- ./redis1.conf:/usr/local/etc/redis/redis.confslave1:image: rediscontainer_name: redis-slave-1ports:- 6380:6380network_mode: bridgecommand: redis-server /usr/local/etc/redis/redis.conf --slaveof 10.4.7.124 6379volumes:- ./redis2.conf:/usr/local/etc/redis/redis.confslave2:image: rediscontainer_name: redis-slave-2ports:- 6381:6381network_mode: bridgecommand: redis-server /usr/local/etc/redis/redis.conf --slaveof 10.4.7.124 6379volumes:- ./redis3.conf:/usr/local/etc/redis/redis.confsentinel1:image: rediscontainer_name: redis-sentinel-1ports:- 26379:26379network_mode: bridgecommand: redis-sentinel /usr/local/etc/redis/sentinel.confvolumes:- ./sentinel1.conf:/usr/local/etc/redis/sentinel.confsentinel2:image: rediscontainer_name: redis-sentinel-2ports:- 26380:26380network_mode: bridgecommand: redis-sentinel /usr/local/etc/redis/sentinel.confvolumes:- ./sentinel2.conf:/usr/local/etc/redis/sentinel.confsentinel3:image: rediscontainer_name: redis-sentinel-3ports:- 26381:26381network_mode: bridgecommand: redis-sentinel /usr/local/etc/redis/sentinel.confvolumes:- ./sentinel3.conf:/usr/local/etc/redis/sentinel.confredis1.conf
bind 0.0.0.0
protected-mode no
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize no
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile
databases 16
always-show-logo yes
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir ./
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
replica-priority 100
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
appendonly no
appendfilename appendonly.aof
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yesredis2.conf
bind 0.0.0.0
# 这个模式要no否则不好连接测试
protected-mode no
port 6380
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize no
supervised no
pidfile /var/run/redis_6380.pid
loglevel notice
logfile
databases 16
always-show-logo yes
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir ./
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
replica-priority 100
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
appendonly no
appendfilename appendonly.aof
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yesredis3.conf
bind 0.0.0.0
protected-mode no
port 6381
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize no
supervised no
pidfile /var/run/redis_6381.pid
loglevel notice
logfile
databases 16
always-show-logo yes
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir ./
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
replica-priority 100
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
appendonly no
appendfilename appendonly.aof
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yessentinel1.conf
port 26379
sentinel monitor mymaster 10.4.7.124 6379 2
sentinel down-after-milliseconds mymaster 10000
sentinel failover-timeout mymaster 180000
sentinel parallel-syncs mymaster 1
sentinel2.conf
port 26380
sentinel monitor mymaster 10.4.7.124 6379 2
sentinel down-after-milliseconds mymaster 10000
sentinel failover-timeout mymaster 180000
sentinel parallel-syncs mymaster 1 sentinel3.conf
port 26381
sentinel monitor mymaster 10.4.7.124 6379 2
sentinel down-after-milliseconds mymaster 10000
sentinel failover-timeout mymaster 180000
sentinel parallel-syncs mymaster 1 启动所有服务
docker-compose up -d
docker compose up -d
[rootdocker_124 sentinel]# docker compose up -d
[] Running 6/6✔ Container redis-slave-2 Started 0.1s ✔ Container redis-sentinel-1 Started 0.1s ✔ Container redis-sentinel-2 Started 0.1s ✔ Container redis-sentinel-3 Started 0.1s ✔ Container redis-master Started 0.1s ✔ Container redis-slave-1 Started 停止所有服务
docker-compose down
[rootdocker_124 sentinel]# docker compose up -d
[] Running 6/6✔ Container redis-slave-2 Started 0.1s ✔ Container redis-sentinel-1 Started 0.1s ✔ Container redis-sentinel-2 Started 0.1s ✔ Container redis-sentinel-3 Started 0.1s ✔ Container redis-master Started 0.1s ✔ Container redis-slave-1 Started 集群启动好以后测试是否正常启动
[rootdocker_124 sentinel]# docker run -it --rm redis redis-cli -h 10.4.7.124 -p 6379 info replication
# Replication
role:master
connected_slaves:2
slave0:ip172.17.0.1,port6380,stateonline,offset170743,lag1
slave1:ip172.17.0.1,port6381,stateonline,offset170743,lag0
master_failover_state:no-failover
master_replid:c85254ee815510a442514fa73aeba46fc8fb018f
master_replid2:0000000000000000000000000000000000000000
master_repl_offset:170743
second_repl_offset:-1
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:1
repl_backlog_histlen:170743[rootdocker_124 sentinel]# docker run -it --rm redis redis-cli -h 10.4.7.124 -p 6380
[rootdocker_124 sentinel]# docker run -it --rm redis redis-cli -h 10.4.7.124 -p 6381
[rootdocker_124 sentinel]# docker run -it --rm redis redis-cli -h 10.4.7.124 -p 26380
[rootdocker_124 sentinel]# docker run -it --rm redis redis-cli -h 10.4.7.124 -p 26381
[rootdocker_124 sentinel]# docker run -it --rm redis redis-cli -h 10.4.7.124 -p 26379
10.4.7.124:26379 sentinel master mymaster1) name2) mymaster3) ip4) 10.4.7.1245) port6) 63797) runid8) 8c21a5f4a8fdcabb3677603800a66a2b41c62f3c9) flags
10) master……省略……[rootdocker_124 sentinel]# docker run -it --rm redis redis-cli -h 10.4.7.124 -p 26379 sentinel slaves mymaster
1) 1) name2) 172.17.0.1:63813) ip4) 172.17.0.15) port6) 63817) runid8) 10053461cfeedbf53aec9f0afa66570f079483f69) flags10) slave……省略……环境正常启动后可以运行Python脚本进行测试
测试python脚本
from redis.sentinel import Sentinel
import time
import random# 生成随机字符串
def generate_random_str(randomlength16):random_str base_str ABCDEFGHIGKLMNOPQRSTUVWXYZabcdefghigklmnopqrstuvwxyz0123456789length len(base_str) - 1for i in range(randomlength):random_str base_str[random.randint(0, length)]return random_str# 连接Redis Sentinel
sentinel Sentinel([(10.4.7.124, 26379),(10.4.7.124, 26380),(10.4.7.124, 26381)],socket_timeout0.5)# 获取主服务器地址
master sentinel.master_for(mymaster, socket_timeout0.5, db0)
w_ret master.set(foo, bar)
print(master)
# 输出(192.168.31.87, 26379)
# 获取从服务器地址
slave sentinel.slave_for(mymaster, socket_timeout0.5, db0)
print(slave)
# 输出[(192.168.31.88, 26379)]while True:print(master:, master.get(foo))# master 向redis 插入随机数据master.set(foo, generate_random_str(10))print(sleep 1s)time.sleep(1)
python 程序启动后会每秒请求sentinel地址获取redis连接设置值然后我将sentinel集群的节点停掉两个包括master节点依然可以正常设置值在redis的 master节点可以获取最新设置的值说明sentinel节点停掉两个仍是可用状态。 参考文档 Dock-compose 搭建redis-sentinel测试环境 https://www.cnblogs.com/leffss/p/12082361.html python3连接redis sentinel集群 https://blog.csdn.net/u012887259/article/details/102425691 解决 WARNING: Published ports are discarded when using host network mode 问题 https://blog.csdn.net/zengNLP/article/details/127220244 安装 docker compose 我安装完只能用docker compose 不能用docker-compose不知道为什么。 yum -y install docker-compose-pluginhttps://blog.csdn.net/pushiqiang/article/details/78682323 https://zhuanlan.zhihu.com/p/570108654