docker run -d \
--name redis-node-1 \
--net host \
--privileged=true \
-v /dockerData/redis-cluster/redis-node-1:/data redis:6.0.8 \
--cluster-enabled yes --appendonly yes --port 6381
docker run -d \
--name redis-node-2 \
--net host \
--privileged=true \
-v /dockerData/redis-cluster/redis-node-2:/data redis:6.0.8 \
--cluster-enabled yes --appendonly yes --port 6382
docker run -d \
--name redis-node-3 \
--net host \
--privileged=true \
-v /dockerData/redis-cluster/redis-node-3:/data redis:6.0.8 \
--cluster-enabled yes --appendonly yes --port 6383
docker run -d \
--name redis-node-4 \
--net host \
--privileged=true \
-v /dockerData/redis-cluster/redis-node-4:/data redis:6.0.8 \
--cluster-enabled yes --appendonly yes --port 6384
docker run -d \
--name redis-node-5 \
--net host \
--privileged=true \
-v /dockerData/redis-cluster/redis-node-5:/data redis:6.0.8 \
--cluster-enabled yes --appendonly yes --port 6385
docker run -d \
--name redis-node-6 \
--net host \
--privileged=true \
-v /dockerData/redis-cluster/redis-node-6:/data redis:6.0.8 \
--cluster-enabled yes --appendonly yes --port 6386
[root@itogge ~]# docker exec -it redis-node-1 /bin/bash
root@itogge:/data#
root@itogge:/data# redis-cli --cluster create 192.168.31.150:6381 192.168.31.150:6382 \
> 192.168.31.150:6383 192.168.31.150:6384 \
> 192.168.31.150:6385 192.168.31.150:6386 \
> --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 192.168.31.150:6385 to 192.168.31.150:6381
Adding replica 192.168.31.150:6386 to 192.168.31.150:6382
Adding replica 192.168.31.150:6384 to 192.168.31.150:6383
>>> Trying to optimize slaves allocation for anti-affinity
[WARNING] Some slaves are in the same host as their master
M: 78113a8ab6b5b7091d855e0a0e3329c6e07023cb 192.168.31.150:6381
slots:[0-5460] (5461 slots) master
M: 10a1b4e6f2469f3f7356d8ca870f061c13bd5fe5 192.168.31.150:6382
slots:[5461-10922] (5462 slots) master
M: e40a581a6db233f9439b0dbc9ca2e8ad1fa7ce27 192.168.31.150:6383
slots:[10923-16383] (5461 slots) master
S: 683f1c89ee11c14fb8a1d37bed7bda7d56a05a24 192.168.31.150:6384
replicates 78113a8ab6b5b7091d855e0a0e3329c6e07023cb
S: 85a681bf05e375d99005cc44a39409a711fa904b 192.168.31.150:6385
replicates 10a1b4e6f2469f3f7356d8ca870f061c13bd5fe5
S: 4f32bf0a533339c48688daf14930c7d55d2eac78 192.168.31.150:6386
replicates e40a581a6db233f9439b0dbc9ca2e8ad1fa7ce27
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
.
>>> Performing Cluster Check (using node 192.168.31.150:6381)
M: 78113a8ab6b5b7091d855e0a0e3329c6e07023cb 192.168.31.150:6381
slots:[0-5460] (5461 slots) master
1 additional replica(s)
S: 683f1c89ee11c14fb8a1d37bed7bda7d56a05a24 192.168.31.150:6384
slots: (0 slots) slave
replicates 78113a8ab6b5b7091d855e0a0e3329c6e07023cb
M: e40a581a6db233f9439b0dbc9ca2e8ad1fa7ce27 192.168.31.150:6383
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
M: 10a1b4e6f2469f3f7356d8ca870f061c13bd5fe5 192.168.31.150:6382
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: 4f32bf0a533339c48688daf14930c7d55d2eac78 192.168.31.150:6386
slots: (0 slots) slave
replicates e40a581a6db233f9439b0dbc9ca2e8ad1fa7ce27
S: 85a681bf05e375d99005cc44a39409a711fa904b 192.168.31.150:6385
slots: (0 slots) slave
replicates 10a1b4e6f2469f3f7356d8ca870f061c13bd5fe5
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@itogge:/data#
root@itogge:/data# redis-cli -p 6381
127.0.0.1:6381> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:130
cluster_stats_messages_pong_sent:141
cluster_stats_messages_sent:271
cluster_stats_messages_ping_received:136
cluster_stats_messages_pong_received:130
cluster_stats_messages_meet_received:5
cluster_stats_messages_received:271
127.0.0.1:6381> cluster nodes
683f1c89ee11c14fb8a1d37bed7bda7d56a05a24 192.168.31.150:6384@16384 slave 78113a8ab6b5b7091d855e0a0e3329c6e07023cb 0 1656933870322 1 connected
e40a581a6db233f9439b0dbc9ca2e8ad1fa7ce27 192.168.31.150:6383@16383 master - 0 1656933871345 3 connected 10923-16383
10a1b4e6f2469f3f7356d8ca870f061c13bd5fe5 192.168.31.150:6382@16382 master - 0 1656933868267 2 connected 5461-10922
78113a8ab6b5b7091d855e0a0e3329c6e07023cb 192.168.31.150:6381@16381 myself,master - 0 1656933867000 1 connected 0-5460
4f32bf0a533339c48688daf14930c7d55d2eac78 192.168.31.150:6386@16386 slave e40a581a6db233f9439b0dbc9ca2e8ad1fa7ce27 0 1656933870000 3 connected
85a681bf05e375d99005cc44a39409a711fa904b 192.168.31.150:6385@16385 slave 10a1b4e6f2469f3f7356d8ca870f061c13bd5fe5 0 1656933869000 2 connected
127.0.0.1:6381>
PS:主节点宕机,从节点会自动变为主节点,主节点从新接入集群,主节点变成从节点,想主节点回到主节点,则需要重新接入从节点
[root@itogge ~]# docker run -d \
> --name redis-node-7 \
> --net host \
> --privileged=true \
> -v /dockerData/redis-cluster/redis-node-7:/data redis:6.0.8 \
> --cluster-enabled yes --appendonly yes --port 6387
526d98440b7632409b9976e965ed02d30954bb152ab95696d70f7072254f565b
[root@itogge ~]# docker run -d \
> --name redis-node-8 \
> --net host \
> --privileged=true \
> -v /dockerData/redis-cluster/redis-node-8:/data redis:6.0.8 \
> --cluster-enabled yes --appendonly yes --port 6388
d34479e765c556766cebebc5c3bda87a9680dbf6dc5f548723a30b67b8ccdebe
[root@itogge ~]#
[root@itogge redis-cluster]# docker exec -it redis-node-7 /bin/bash
root@itogge:/data#
root@itogge:/data# redis-cli --cluster add-node 192.168.31.150:6387 192.168.31.150:6381
>>> Adding node 192.168.31.150:6387 to cluster 192.168.31.150:6381
>>> Performing Cluster Check (using node 192.168.31.150:6381)
M: 78113a8ab6b5b7091d855e0a0e3329c6e07023cb 192.168.31.150:6381
slots:[0-5460] (5461 slots) master
1 additional replica(s)
S: 683f1c89ee11c14fb8a1d37bed7bda7d56a05a24 192.168.31.150:6384
slots: (0 slots) slave
replicates 78113a8ab6b5b7091d855e0a0e3329c6e07023cb
M: e40a581a6db233f9439b0dbc9ca2e8ad1fa7ce27 192.168.31.150:6383
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
M: 10a1b4e6f2469f3f7356d8ca870f061c13bd5fe5 192.168.31.150:6382
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: 4f32bf0a533339c48688daf14930c7d55d2eac78 192.168.31.150:6386
slots: (0 slots) slave
replicates e40a581a6db233f9439b0dbc9ca2e8ad1fa7ce27
S: 85a681bf05e375d99005cc44a39409a711fa904b 192.168.31.150:6385
slots: (0 slots) slave
replicates 10a1b4e6f2469f3f7356d8ca870f061c13bd5fe5
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 192.168.31.150:6387 to make it join the cluster.
[OK] New node added correctly.
root@itogge:/data#
root@itogge:/data# redis-cli --cluster reshard 192.168.31.150:6381
>>> Performing Cluster Check (using node 192.168.31.150:6381)
M: 78113a8ab6b5b7091d855e0a0e3329c6e07023cb 192.168.31.150:6381
slots:[1365-5460] (4096 slots) master
1 additional replica(s)
S: 683f1c89ee11c14fb8a1d37bed7bda7d56a05a24 192.168.31.150:6384
slots: (0 slots) slave
replicates 78113a8ab6b5b7091d855e0a0e3329c6e07023cb
M: e40a581a6db233f9439b0dbc9ca2e8ad1fa7ce27 192.168.31.150:6383
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
M: 10a1b4e6f2469f3f7356d8ca870f061c13bd5fe5 192.168.31.150:6382
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
M: f2d0538bf5056ce90e54ed0ccd1e30a5bf4081dd 192.168.31.150:6387
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
S: 4f32bf0a533339c48688daf14930c7d55d2eac78 192.168.31.150:6386
slots: (0 slots) slave
replicates e40a581a6db233f9439b0dbc9ca2e8ad1fa7ce27
S: 85a681bf05e375d99005cc44a39409a711fa904b 192.168.31.150:6385
slots: (0 slots) slave
replicates 10a1b4e6f2469f3f7356d8ca870f061c13bd5fe5
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 4096
What is the receiving node ID? f2d0538bf5056ce90e54ed0ccd1e30a5bf4081dd
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1: all
root@itogge:/data# redis-cli --cluster add-node 192.168.31.150:6388 192.168.31.150:6387 --cluster-slave --cluster-master-id f2d0538bf5056ce90e54ed0ccd1e30a5bf4081dd
>>> Adding node 192.168.31.150:6388 to cluster 192.168.31.150:6387
>>> Performing Cluster Check (using node 192.168.31.150:6387)
M: f2d0538bf5056ce90e54ed0ccd1e30a5bf4081dd 192.168.31.150:6387
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
S: 683f1c89ee11c14fb8a1d37bed7bda7d56a05a24 192.168.31.150:6384
slots: (0 slots) slave
replicates 78113a8ab6b5b7091d855e0a0e3329c6e07023cb
M: 10a1b4e6f2469f3f7356d8ca870f061c13bd5fe5 192.168.31.150:6382
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
M: 78113a8ab6b5b7091d855e0a0e3329c6e07023cb 192.168.31.150:6381
slots:[1365-5460] (4096 slots) master
1 additional replica(s)
S: 85a681bf05e375d99005cc44a39409a711fa904b 192.168.31.150:6385
slots: (0 slots) slave
replicates 10a1b4e6f2469f3f7356d8ca870f061c13bd5fe5
S: 4f32bf0a533339c48688daf14930c7d55d2eac78 192.168.31.150:6386
slots: (0 slots) slave
replicates e40a581a6db233f9439b0dbc9ca2e8ad1fa7ce27
M: e40a581a6db233f9439b0dbc9ca2e8ad1fa7ce27 192.168.31.150:6383
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 192.168.31.150:6388 to make it join the cluster.
Waiting for the cluster to join
>>> Configure node as replica of 192.168.31.150:6387.
[OK] New node added correctly.
root@itogge:/data#
root@itogge:/data# redis-cli --cluster del-node 192.168.31.150:6388 690001803b15253522c8b273b7e5d556ffd6ed5b
>>> Removing node 690001803b15253522c8b273b7e5d556ffd6ed5b from cluster 192.168.31.150:6388
>>> Sending CLUSTER FORGET messages to the cluster...
>>> Sending CLUSTER RESET SOFT to the deleted node.
root@itogge:/data#
root@itogge:/data# redis-cli --cluster reshard 192.168.31.150:6381
>>> Performing Cluster Check (using node 192.168.31.150:6381)
M: 78113a8ab6b5b7091d855e0a0e3329c6e07023cb 192.168.31.150:6381
slots:[1365-5460] (4096 slots) master
1 additional replica(s)
S: 683f1c89ee11c14fb8a1d37bed7bda7d56a05a24 192.168.31.150:6384
slots: (0 slots) slave
replicates 78113a8ab6b5b7091d855e0a0e3329c6e07023cb
M: e40a581a6db233f9439b0dbc9ca2e8ad1fa7ce27 192.168.31.150:6383
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
M: 10a1b4e6f2469f3f7356d8ca870f061c13bd5fe5 192.168.31.150:6382
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
M: f2d0538bf5056ce90e54ed0ccd1e30a5bf4081dd 192.168.31.150:6387
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
S: 4f32bf0a533339c48688daf14930c7d55d2eac78 192.168.31.150:6386
slots: (0 slots) slave
replicates e40a581a6db233f9439b0dbc9ca2e8ad1fa7ce27
S: 85a681bf05e375d99005cc44a39409a711fa904b 192.168.31.150:6385
slots: (0 slots) slave
replicates 10a1b4e6f2469f3f7356d8ca870f061c13bd5fe5
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 4096
What is the receiving node ID? 78113a8ab6b5b7091d855e0a0e3329c6e07023cb
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1: f2d0538bf5056ce90e54ed0ccd1e30a5bf4081dd
Source node #2: done
PS:拿出多少个槽位,哪个节点来接受提取的槽位,从哪个节点提出槽位;可多次分配
root@itogge:/data# redis-cli --cluster del-node 192.168.31.150:6387 f2d0538bf5056ce90e54ed0ccd1e30a5bf4081dd
>>> Removing node f2d0538bf5056ce90e54ed0ccd1e30a5bf4081dd from cluster 192.168.31.150:6387
>>> Sending CLUSTER FORGET messages to the cluster...
>>> Sending CLUSTER RESET SOFT to the deleted node.
root@itogge:/data#