-
Notifications
You must be signed in to change notification settings - Fork 241
/
Copy pathreplication.c
4590 lines (4152 loc) · 202 KB
/
replication.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* Asynchronous replication implementation.
*
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "server.h"
#include "cluster.h"
#include "bio.h"
#include "functions.h"
#include <memory.h>
#include <sys/time.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/socket.h>
#include <sys/stat.h>
void replicationDiscardCachedMaster(void);
void replicationResurrectCachedMaster(connection *conn);
void replicationSendAck(void);
void replicaPutOnline(client *slave);
void replicaStartCommandStream(client *slave);
int cancelReplicationHandshake(int reconnect);
/* We take a global flag to remember if this instance generated an RDB
* because of replication, so that we can remove the RDB file in case
* the instance is configured to have no persistence. */
int RDBGeneratedByReplication = 0;
/* 从节点建立主从复制的调用链:
* 1. 从节点(slave)侧会调用 replicationSetMaster() 来建立与主节点的连接。
* 2. replicationSetMaster() 完成一些准备工作之后会调用 connectWithMaster() 负责建立连接
* 3. connectWithMaster() 发起与主节点的连接,并在连接建立后回调 syncWithMaster()
* 4. syncWithMaster() 会负责建立主从同步的主要工作,在此过程中 syncWithMaster() 会作为 event loop 的 handler 被多次回调
* 5. 主从握手完成后 syncWithMaster() 会调用 slaveTryPartialResynchronization() 尝试进行部分同步,后者会负责向主节点发送 PSYNC 命令,并处理主节点响应。slaveTryPartialResynchronization() 同样会被多次回调,并负责设置同步过程中的 replid 和 offset 等参数。
* 6. 一旦 PSYNC 相关参数设置完成,syncWithMaster() 会调用 readSyncBulkPayload() 阻塞式地接受并加载主节点传来的 RDB 文件。
* 7. RDB 文件加载完成后,从节点就可以从 redisServer.master 结构中继续接收来自主节点的后续数据
*/
/* 主从复制,主节点方主要提供两个函数
* syncCommand: 该函数用来处理从节点发送来的 SYNC 和 PSYNC 命令,用于主从同步
* replconfCommand: 该函数用来修改主节点方持有的从节点配置信息
* 上面两个函数对应的命令都是 redis 内部的命令。
* 这两个函数可以看作是主节点的入口函数
*/
/* --------------------------- Utility functions ---------------------------- */
/* Return the pointer to a string representing the slave ip:listening_port
* pair. Mostly useful for logging, since we want to log a slave using its
* IP address and its listening port which is more clear for the user, for
* example: "Closing connection with replica 10.1.2.3:6380". */
char *replicationGetSlaveName(client *c) {
static char buf[NET_HOST_PORT_STR_LEN];
char ip[NET_IP_STR_LEN];
ip[0] = '\0';
buf[0] = '\0';
if (c->slave_addr ||
connPeerToString(c->conn,ip,sizeof(ip),NULL) != -1)
{
char *addr = c->slave_addr ? c->slave_addr : ip;
if (c->slave_listening_port)
anetFormatAddr(buf,sizeof(buf),addr,c->slave_listening_port);
else
snprintf(buf,sizeof(buf),"%s:<unknown-replica-port>",addr);
} else {
snprintf(buf,sizeof(buf),"client id #%llu",
(unsigned long long) c->id);
}
return buf;
}
/* Plain unlink() can block for quite some time in order to actually apply
* the file deletion to the filesystem. This call removes the file in a
* background thread instead. We actually just do close() in the thread,
* by using the fact that if there is another instance of the same file open,
* the foreground unlink() will only remove the fs name, and deleting the
* file's storage space will only happen once the last reference is lost. */
int bg_unlink(const char *filename) {
int fd = open(filename,O_RDONLY|O_NONBLOCK);
if (fd == -1) {
/* Can't open the file? Fall back to unlinking in the main thread. */
return unlink(filename);
} else {
/* The following unlink() removes the name but doesn't free the
* file contents because a process still has it open. */
int retval = unlink(filename);
if (retval == -1) {
/* If we got an unlink error, we just return it, closing the
* new reference we have to the file. */
int old_errno = errno;
close(fd); /* This would overwrite our errno. So we saved it. */
errno = old_errno;
return -1;
}
bioCreateCloseJob(fd, 0);
return 0; /* Success. */
}
}
/* ---------------------------------- MASTER -------------------------------- */
/* 创建复制积压缓冲区 */
void createReplicationBacklog(void) {
serverAssert(server.repl_backlog == NULL);
server.repl_backlog = zmalloc(sizeof(replBacklog));
server.repl_backlog->ref_repl_buf_node = NULL;
server.repl_backlog->unindexed_count = 0;
server.repl_backlog->blocks_index = raxNew();
server.repl_backlog->histlen = 0;
/* We don't have any data inside our buffer, but virtually the first
* byte we have is the next byte that will be generated for the
* replication stream. */
server.repl_backlog->offset = server.master_repl_offset+1;
}
/* This function is called when the user modifies the replication backlog
* size at runtime. It is up to the function to resize the buffer and setup it
* so that it contains the same data as the previous one (possibly less data,
* but the most recent bytes, or the same data and more free space in case the
* buffer is enlarged). */
void resizeReplicationBacklog(void) {
if (server.repl_backlog_size < CONFIG_REPL_BACKLOG_MIN_SIZE)
server.repl_backlog_size = CONFIG_REPL_BACKLOG_MIN_SIZE;
if (server.repl_backlog)
incrementalTrimReplicationBacklog(REPL_BACKLOG_TRIM_BLOCKS_PER_CALL);
}
/* 释放复制积压缓冲区空间 */
void freeReplicationBacklog(void) {
serverAssert(listLength(server.slaves) == 0);
if (server.repl_backlog == NULL) return;
/* Decrease the start buffer node reference count. */
/* 将缓冲区块头结点的引用计数置 0 */
if (server.repl_backlog->ref_repl_buf_node) {
replBufBlock *o = listNodeValue(
server.repl_backlog->ref_repl_buf_node);
serverAssert(o->refcount == 1); /* Last reference. */
o->refcount--;
}
/* Replication buffer blocks are completely released when we free the
* backlog, since the backlog is released only when there are no replicas
* and the backlog keeps the last reference of all blocks. */
/* 异步释放缓冲区块链表和缓冲区块索引 */
freeReplicationBacklogRefMemAsync(server.repl_buffer_blocks,
server.repl_backlog->blocks_index);
/* 重置复制积压缓冲区 */
resetReplicationBuffer();
zfree(server.repl_backlog);
server.repl_backlog = NULL;
}
/* To make search offset from replication buffer blocks quickly
* when replicas ask partial resynchronization, we create one index
* block every REPL_BACKLOG_INDEX_PER_BLOCKS blocks. */
/* 给新加入的复制积压缓冲区块节点添加索引 */
void createReplicationBacklogIndex(listNode *ln) {
/* 未添加索引的节点计数器 +1 */
server.repl_backlog->unindexed_count++;
/* 这里是创建的稀疏索引,每 64 个节点建立一个索引 */
if (server.repl_backlog->unindexed_count >= REPL_BACKLOG_INDEX_PER_BLOCKS) {
replBufBlock *o = listNodeValue(ln);
/* 获取当前节点的复制偏移量 */
uint64_t encoded_offset = htonu64(o->repl_offset);
/* 添加一个索引 */
raxInsert(server.repl_backlog->blocks_index,
(unsigned char*)&encoded_offset, sizeof(uint64_t),
ln, NULL);
server.repl_backlog->unindexed_count = 0;
}
}
/* Rebase replication buffer blocks' offset since the initial
* setting offset starts from 0 when master restart. */
void rebaseReplicationBuffer(long long base_repl_offset) {
raxFree(server.repl_backlog->blocks_index);
server.repl_backlog->blocks_index = raxNew();
server.repl_backlog->unindexed_count = 0;
listIter li;
listNode *ln;
listRewind(server.repl_buffer_blocks, &li);
while ((ln = listNext(&li))) {
replBufBlock *o = listNodeValue(ln);
o->repl_offset += base_repl_offset;
createReplicationBacklogIndex(ln);
}
}
/* 重置复制积压缓冲区 */
void resetReplicationBuffer(void) {
server.repl_buffer_mem = 0;
server.repl_buffer_blocks = listCreate();
listSetFreeMethod(server.repl_buffer_blocks, (void (*)(void*))zfree);
}
int canFeedReplicaReplBuffer(client *replica) {
/* Don't feed replicas that only want the RDB. */
/* 如果副本客户端仅仅同步 RDB,表示不能发送复制积压缓冲区中的数据给它 */
if (replica->flags & CLIENT_REPL_RDBONLY) return 0;
/* Don't feed replicas that are still waiting for BGSAVE to start. */
/* 如果副本还在做 RDB 同步,不能发送复制积压缓冲区中的数据给它 */
if (replica->replstate == SLAVE_STATE_WAIT_BGSAVE_START) return 0;
return 1;
}
/* Similar with 'prepareClientToWrite', note that we must call this function
* before feeding replication stream into global replication buffer, since
* clientHasPendingReplies in prepareClientToWrite will access the global
* replication buffer to make judgements. */
/* 将副本客户端加入待写处理队列中 */
int prepareReplicasToWrite(void) {
listIter li;
listNode *ln;
int prepared = 0;
/* 遍历当前主节点下的所有从节点客户端 */
listRewind(server.slaves,&li);
while((ln = listNext(&li))) {
client *slave = ln->value;
/* 如果该从节点客户端不能接收复制积压缓冲区数据,跳过 */
if (!canFeedReplicaReplBuffer(slave)) continue;
/* 这里其实就是将客户端放入待写处理客户端队列中 */
if (prepareClientToWrite(slave) == C_ERR) continue;
prepared++;
}
return prepared;
}
/* Wrapper for feedReplicationBuffer() that takes Redis string objects
* as input. */
/* 向复制积压缓冲区中添加一个对象 */
void feedReplicationBufferWithObject(robj *o) {
char llstr[LONG_STR_SIZE];
void *p;
size_t len;
/* 根据不同的编码类型获取对象的实际值 */
if (o->encoding == OBJ_ENCODING_INT) {
len = ll2string(llstr,sizeof(llstr),(long)o->ptr);
p = llstr;
} else {
len = sdslen(o->ptr);
p = o->ptr;
}
/* 将实际值,以及值的长度放入复制积压缓冲区中 */
feedReplicationBuffer(p,len);
}
/* Generally, we only have one replication buffer block to trim when replication
* backlog size exceeds our setting and no replica reference it. But if replica
* clients disconnect, we need to free many replication buffer blocks that are
* referenced. It would cost much time if there are a lots blocks to free, that
* will freeze server, so we trim replication backlog incrementally. */
/* 从复制积压缓冲区链表头部删除一些数据缓冲区块节点 */
void incrementalTrimReplicationBacklog(size_t max_blocks) {
serverAssert(server.repl_backlog != NULL);
size_t trimmed_blocks = 0;
/* 如果目前复制积压缓冲区要放入的数据超过了复制积压缓冲区的限制 */
while (server.repl_backlog->histlen > server.repl_backlog_size &&
trimmed_blocks < max_blocks)
{
/* We never trim backlog to less than one block. */
if (listLength(server.repl_buffer_blocks) <= 1) break;
/* Replicas increment the refcount of the first replication buffer block
* they refer to, in that case, we don't trim the backlog even if
* backlog_histlen exceeds backlog_size. This implicitly makes backlog
* bigger than our setting, but makes the master accept partial resync as
* much as possible. So that backlog must be the last reference of
* replication buffer blocks. */
/* 获取第一个缓冲区块 */
listNode *first = listFirst(server.repl_buffer_blocks);
serverAssert(first == server.repl_backlog->ref_repl_buf_node);
replBufBlock *fo = listNodeValue(first);
/* 如果引用计数不为 1,退出循环,不能释放当前换乘区块空间 */
if (fo->refcount != 1) break;
/* We don't try trim backlog if backlog valid size will be lessen than
* setting backlog size once we release the first repl buffer block. */
/* 如果释放第一个缓冲区块节点后,空间没有超过 repl_backlog_size,退出循环,没必要释放该节点 */
if (server.repl_backlog->histlen - (long long)fo->size <=
server.repl_backlog_size) break;
/* Decr refcount and release the first block later. */
/* 这里引用计数 --,上面已经确定 refcount 为 1,这里-1,就是 0 了,可以释放了 */
fo->refcount--;
trimmed_blocks++;
/* 调整复制积压缓冲区中数据的大小 */
server.repl_backlog->histlen -= fo->size;
/* Go to use next replication buffer block node. */
/* 将第二个节点设置为头结点 */
listNode *next = listNextNode(first);
server.repl_backlog->ref_repl_buf_node = next;
serverAssert(server.repl_backlog->ref_repl_buf_node != NULL);
/* Incr reference count to keep the new head node. */
((replBufBlock *)listNodeValue(next))->refcount++;
/* Remove the node in recorded blocks. */
/* 删除该节点的索引 */
uint64_t encoded_offset = htonu64(fo->repl_offset);
raxRemove(server.repl_backlog->blocks_index,
(unsigned char*)&encoded_offset, sizeof(uint64_t), NULL);
/* Delete the first node from global replication buffer. */
serverAssert(fo->refcount == 0 && fo->used == fo->size);
/* 内存使用量调整 */
server.repl_buffer_mem -= (fo->size +
sizeof(listNode) + sizeof(replBufBlock));
/* 删除节点 */
listDelNode(server.repl_buffer_blocks, first);
}
/* Set the offset of the first byte we have in the backlog. */
server.repl_backlog->offset = server.master_repl_offset -
server.repl_backlog->histlen + 1;
}
/* Free replication buffer blocks that are referenced by this client. */
void freeReplicaReferencedReplBuffer(client *replica) {
if (replica->ref_repl_buf_node != NULL) {
/* Decrease the start buffer node reference count. */
replBufBlock *o = listNodeValue(replica->ref_repl_buf_node);
serverAssert(o->refcount > 0);
o->refcount--;
incrementalTrimReplicationBacklog(REPL_BACKLOG_TRIM_BLOCKS_PER_CALL);
}
replica->ref_repl_buf_node = NULL;
replica->ref_block_pos = 0;
}
/* Append bytes into the global replication buffer list, replication backlog and
* all replica clients use replication buffers collectively, this function replace
* 'addReply*', 'feedReplicationBacklog' for replicas and replication backlog,
* First we add buffer into global replication buffer block list, and then
* update replica / replication-backlog referenced node and block position. */
/* 将数据放入复制积压缓冲区块 */
void feedReplicationBuffer(char *s, size_t len) {
static long long repl_block_id = 0;
/* 复制积压缓冲区不存在,直接返回 */
if (server.repl_backlog == NULL) return;
/* 复制偏移量增加 */
server.master_repl_offset += len;
/* 存储的数据大小增加 */
server.repl_backlog->histlen += len;
size_t start_pos = 0; /* The position of referenced block to start sending. */
listNode *start_node = NULL; /* Replica/backlog starts referenced node. */
int add_new_block = 0; /* Create new block if current block is total used. */
listNode *ln = listLast(server.repl_buffer_blocks);
replBufBlock *tail = ln ? listNodeValue(ln) : NULL;
/* Append to tail string when possible. */
/* 尾结点的缓冲区块还没有被使用完 */
if (tail && tail->size > tail->used) {
/* 开始节点指向尾节点 */
start_node = listLast(server.repl_buffer_blocks);
/* 开始位置指向尾缓冲区块空闲位置 */
start_pos = tail->used;
/* Copy the part we can fit into the tail, and leave the rest for a
* new node */
/* 先尽量将命令放入尾节点缓冲区块,如果放不下会在后面进行处理 */
size_t avail = tail->size - tail->used;
size_t copy = (avail >= len) ? len : avail;
memcpy(tail->buf + tail->used, s, copy);
tail->used += copy;
s += copy;
len -= copy;
}
/* len > 0 表示尾结点存不下 */
if (len) {
/* Create a new node, make sure it is allocated to at
* least PROTO_REPLY_CHUNK_BYTES */
size_t usable_size;
/* 根据剩余的 len 来创建一个缓冲区块,该缓冲区块至少会分配 PROTO_REPLY_CHUNK_BYTES 空间 */
size_t size = (len < PROTO_REPLY_CHUNK_BYTES) ? PROTO_REPLY_CHUNK_BYTES : len;
tail = zmalloc_usable(size + sizeof(replBufBlock), &usable_size);
/* Take over the allocation's internal fragmentation */
/* 设置给数据分配的空间大小 */
tail->size = usable_size - sizeof(replBufBlock);
/* 设置新建的缓冲区块本次需要使用到的大小 */
tail->used = len;
tail->refcount = 0;
tail->repl_offset = server.master_repl_offset - tail->used + 1;
tail->id = repl_block_id++;
/* 将剩余的数据放入新建的缓冲区块中 */
memcpy(tail->buf, s, len);
/* 放入链表尾部 */
listAddNodeTail(server.repl_buffer_blocks, tail);
/* We also count the list node memory into replication buffer memory. */
/* 复制积压缓冲区总共使用的内存需要加上链表节点的空间 */
server.repl_buffer_mem += (usable_size + sizeof(listNode));
add_new_block = 1;
/* 如果之前尾结点数据放满了,指定尾结点为当前新建节点,并且开始的偏移量为 0 */
if (start_node == NULL) {
start_node = listLast(server.repl_buffer_blocks);
start_pos = 0;
}
}
/* For output buffer of replicas. */
listIter li;
/* 遍历所有从节点 */
listRewind(server.slaves,&li);
while((ln = listNext(&li))) {
client *slave = ln->value;
if (!canFeedReplicaReplBuffer(slave)) continue;
/* 如果从节点可以接收复制积压缓冲区的数据 */
/* Update shared replication buffer start position. */
/* 更新从节点的复制位置 */
if (slave->ref_repl_buf_node == NULL) {
slave->ref_repl_buf_node = start_node;
slave->ref_block_pos = start_pos;
/* Only increase the start block reference count. */
((replBufBlock *)listNodeValue(start_node))->refcount++;
}
/* Check output buffer limit only when add new block. */
/* 如果新建了复制积压缓冲区,需要判断客户端的输出缓冲区是否到了上限,如果到上限了需要关闭客户端 */
if (add_new_block) closeClientOnOutputBufferLimitReached(slave, 1);
}
/* For replication backlog */
/* 设置当前正在处理的复制积压缓冲区块 */
if (server.repl_backlog->ref_repl_buf_node == NULL) {
server.repl_backlog->ref_repl_buf_node = start_node;
/* Only increase the start block reference count. */
((replBufBlock *)listNodeValue(start_node))->refcount++;
/* Replication buffer must be empty before adding replication stream
* into replication backlog. */
serverAssert(add_new_block == 1 && start_pos == 0);
}
/* 如果新建了复制积压缓冲区块,为新节点添加索引,这里是每 64 个节点添加一个索引 */
if (add_new_block) {
createReplicationBacklogIndex(listLast(server.repl_buffer_blocks));
}
/* Try to trim replication backlog since replication backlog may exceed
* our setting when we add replication stream. Note that it is important to
* try to trim at least one node since in the common case this is where one
* new backlog node is added and one should be removed. See also comments
* in freeMemoryGetNotCountedMemory for details. */
/* 增量的释放复制积压缓冲区中的复制积压缓冲区块 */
incrementalTrimReplicationBacklog(REPL_BACKLOG_TRIM_BLOCKS_PER_CALL);
}
/* Propagate write commands to replication stream.
*
* This function is used if the instance is a master: we use the commands
* received by our clients in order to create the replication stream.
* Instead if the instance is a replica and has sub-replicas attached, we use
* replicationFeedStreamFromMasterStream() */
/* replicationCron 会每隔 1s 调用一次该函数(或者当有故障转移时是 0.1s),发送复制积压缓冲区中的数据给副本节点
* dictid 表示当前主节点所处于的数据库编号 */
void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) {
int j, len;
char llstr[LONG_STR_SIZE];
/* In case we propagate a command that doesn't touch keys (PING, REPLCONF) we
* pass dbid=server.slaveseldb which may be -1. */
serverAssert(dictid == -1 || (dictid >= 0 && dictid < server.dbnum));
/* If the instance is not a top level master, return ASAP: we'll just proxy
* the stream of data we receive from our master instead, in order to
* propagate *identical* replication stream. In this way this slave can
* advertise the same replication ID as the master (since it shares the
* master replication history and has the same backlog and offsets). */
/* 如果当前节点不是主节点,直接返回 */
if (server.masterhost != NULL) return;
/* If there aren't slaves, and there is no backlog buffer to populate,
* we can return ASAP. */
if (server.repl_backlog == NULL && listLength(slaves) == 0) return;
/* We can't have slaves attached and no backlog. */
serverAssert(!(listLength(slaves) != 0 && server.repl_backlog == NULL));
/* Must install write handler for all replicas first before feeding
* replication stream. */
/* 将符合同步复制积压缓冲区数据的副本 TCP 客户端加入待写客户端队列 */
prepareReplicasToWrite();
/* Send SELECT command to every slave if needed. */
/* 副本选择的数据库和主节点选择的数据库不同的情况,需要发送 select 命令 */
if (server.slaveseldb != dictid) {
robj *selectcmd;
/* For a few DBs we have pre-computed SELECT command. */
/* redis 已经定义好了前 10 个数据库的 select 命令的共享字符串,如果当前 dictid < 10
* 可以直接用共享字符串,不需要再拼接命令 */
if (dictid >= 0 && dictid < PROTO_SHARED_SELECT_CMDS) {
selectcmd = shared.select[dictid];
} else {
int dictid_len;
/* 拼接 select 命令,这里可以看出命令发送的格式
* 第一个字符 * : 表示 multi bulk 类型的命令,一次执行多条命令
* 2 : 表示当前命令有 2 个参数(注意命令名也认为是一个参数)
* $6 : 表示接下来的一个参数的字节数
* SELECT : 参数
*/
dictid_len = ll2string(llstr,sizeof(llstr),dictid);
selectcmd = createObject(OBJ_STRING,
sdscatprintf(sdsempty(),
"*2\r\n$6\r\nSELECT\r\n$%d\r\n%s\r\n",
dictid_len, llstr));
}
/* 将 select 命令字符串加入复制积压缓冲区中 */
feedReplicationBufferWithObject(selectcmd);
if (dictid < 0 || dictid >= PROTO_SHARED_SELECT_CMDS)
decrRefCount(selectcmd);
server.slaveseldb = dictid;
}
/* Write the command to the replication buffer if any. */
char aux[LONG_STR_SIZE+3];
/* Add the multi bulk reply length. */
/* 第一个字符是 * */
aux[0] = '*';
/* 参数的数量 */
len = ll2string(aux+1,sizeof(aux)-1,argc);
aux[len+1] = '\r';
aux[len+2] = '\n';
/* 将上面的 aux 的信息放入复制积压缓冲区块 */
feedReplicationBuffer(aux,len+3);
/* 遍历所有参数 */
for (j = 0; j < argc; j++) {
long objlen = stringObjectLen(argv[j]);
/* We need to feed the buffer with the object as a bulk reply
* not just as a plain string, so create the $..CRLF payload len
* and add the final CRLF */
/* 生成参数长度字符串 */
aux[0] = '$';
len = ll2string(aux+1,sizeof(aux)-1,objlen);
aux[len+1] = '\r';
aux[len+2] = '\n';
/* 将长度格式化后的字符串放入复制积压缓冲区 */
feedReplicationBuffer(aux,len+3);
/* 将参数放入复制积压缓冲区块 */
feedReplicationBufferWithObject(argv[j]);
/* 将 \r\n 两个字符放入复制积压缓冲区块 */
feedReplicationBuffer(aux+len+1,2);
}
}
/* This is a debugging function that gets called when we detect something
* wrong with the replication protocol: the goal is to peek into the
* replication backlog and show a few final bytes to make simpler to
* guess what kind of bug it could be. */
void showLatestBacklog(void) {
if (server.repl_backlog == NULL) return;
if (listLength(server.repl_buffer_blocks) == 0) return;
size_t dumplen = 256;
if (server.repl_backlog->histlen < (long long)dumplen)
dumplen = server.repl_backlog->histlen;
sds dump = sdsempty();
listNode *node = listLast(server.repl_buffer_blocks);
while(dumplen) {
if (node == NULL) break;
replBufBlock *o = listNodeValue(node);
size_t thislen = o->used >= dumplen ? dumplen : o->used;
sds head = sdscatrepr(sdsempty(), o->buf+o->used-thislen, thislen);
sds tmp = sdscatsds(head, dump);
sdsfree(dump);
dump = tmp;
dumplen -= thislen;
node = listPrevNode(node);
}
/* Finally log such bytes: this is vital debugging info to
* understand what happened. */
serverLog(LL_WARNING,"Latest backlog is: '%s'", dump);
sdsfree(dump);
}
/* This function is used in order to proxy what we receive from our master
* to our sub-slaves. */
#include <ctype.h>
void replicationFeedStreamFromMasterStream(char *buf, size_t buflen) {
/* Debugging: this is handy to see the stream sent from master
* to slaves. Disabled with if(0). */
if (0) {
printf("%zu:",buflen);
for (size_t j = 0; j < buflen; j++) {
printf("%c", isprint(buf[j]) ? buf[j] : '.');
}
printf("\n");
}
/* There must be replication backlog if having attached slaves. */
if (listLength(server.slaves)) serverAssert(server.repl_backlog != NULL);
if (server.repl_backlog) {
/* Must install write handler for all replicas first before feeding
* replication stream. */
prepareReplicasToWrite();
feedReplicationBuffer(buf,buflen);
}
}
void replicationFeedMonitors(client *c, list *monitors, int dictid, robj **argv, int argc) {
/* Fast path to return if the monitors list is empty or the server is in loading. */
if (monitors == NULL || listLength(monitors) == 0 || server.loading) return;
listNode *ln;
listIter li;
int j;
sds cmdrepr = sdsnew("+");
robj *cmdobj;
struct timeval tv;
gettimeofday(&tv,NULL);
cmdrepr = sdscatprintf(cmdrepr,"%ld.%06ld ",(long)tv.tv_sec,(long)tv.tv_usec);
if (c->flags & CLIENT_SCRIPT) {
cmdrepr = sdscatprintf(cmdrepr,"[%d lua] ",dictid);
} else if (c->flags & CLIENT_UNIX_SOCKET) {
cmdrepr = sdscatprintf(cmdrepr,"[%d unix:%s] ",dictid,server.unixsocket);
} else {
cmdrepr = sdscatprintf(cmdrepr,"[%d %s] ",dictid,getClientPeerId(c));
}
for (j = 0; j < argc; j++) {
if (argv[j]->encoding == OBJ_ENCODING_INT) {
cmdrepr = sdscatprintf(cmdrepr, "\"%ld\"", (long)argv[j]->ptr);
} else {
cmdrepr = sdscatrepr(cmdrepr,(char*)argv[j]->ptr,
sdslen(argv[j]->ptr));
}
if (j != argc-1)
cmdrepr = sdscatlen(cmdrepr," ",1);
}
cmdrepr = sdscatlen(cmdrepr,"\r\n",2);
cmdobj = createObject(OBJ_STRING,cmdrepr);
listRewind(monitors,&li);
while((ln = listNext(&li))) {
client *monitor = ln->value;
addReply(monitor,cmdobj);
updateClientMemUsage(c);
}
decrRefCount(cmdobj);
}
/* Feed the slave 'c' with the replication backlog starting from the
* specified 'offset' up to the end of the backlog. */
/* 将复制积压缓冲区从 offset 开始的所有命令发送给副本
* 注:这里和 redis 6 版本的区别挺大,有兴趣的可以看看 redis 6 的实现,复制积压缓冲区是使用循环数组实现的,
* 没有使用 rax 数据结构来做快速索引 */
long long addReplyReplicationBacklog(client *c, long long offset) {
long long skip;
serverLog(LL_DEBUG, "[PSYNC] Replica request offset: %lld", offset);
/* 如果副本缓冲区的大小为 0,直接返回 */
if (server.repl_backlog->histlen == 0) {
serverLog(LL_DEBUG, "[PSYNC] Backlog history len is zero");
return 0;
}
serverLog(LL_DEBUG, "[PSYNC] Backlog size: %lld",
server.repl_backlog_size);
serverLog(LL_DEBUG, "[PSYNC] First byte: %lld",
server.repl_backlog->offset);
serverLog(LL_DEBUG, "[PSYNC] History len: %lld",
server.repl_backlog->histlen);
/* Compute the amount of bytes we need to discard. */
/* 用 当前需要读取的起始偏移量 - 复制积压缓冲区第一个字节的偏移量 = 需要忽略的复制积压缓冲区的数据长度 */
skip = offset - server.repl_backlog->offset;
serverLog(LL_DEBUG, "[PSYNC] Skipping: %lld", skip);
/* Iterate recorded blocks, quickly search the approximate node. */
/* 该属性最后会定位到需要发送给副本的第一个复制积压缓冲区块对应的节点,下面 if 判断和 while 循环就是定位逻辑 */
listNode *node = NULL;
/* 判断复制积压缓冲区的快速索引大小是否大于 0 */
if (raxSize(server.repl_backlog->blocks_index) > 0) {
/* long long 转 uint64 */
uint64_t encoded_offset = htonu64(offset);
raxIterator ri;
/* 根据 rax 数据结构生成一个 raxIterator 迭代器 */
raxStart(&ri, server.repl_backlog->blocks_index);
/* 将迭代器迭代到刚好大于 offset 的位置 */
raxSeek(&ri, ">", (unsigned char*)&encoded_offset, sizeof(uint64_t));
/* 如果迭代器直接结束了,表示索引目前的存储的最大偏移量是小于当前给定的偏移量 */
if (raxEOF(&ri)) {
/* No found, so search from the last recorded node. */
/* 将迭代器指向最后一个元素 */
raxSeek(&ri, "$", NULL, 0);
/* 迭代器往前移动一个元素 */
raxPrev(&ri);
/* 当前节点指向了尾部的前一个节点 */
node = (listNode *)ri.data;
} else {
/* 没结束的情况 */
/* 因为迭代器是迭代到了大于 offset 的位置,所以需要前移一个节点 */
raxPrev(&ri); /* Skip the sought node. */
/* We should search from the prev node since the offset of current
* sought node exceeds searching offset. */
if (raxPrev(&ri))
node = (listNode *)ri.data;
else
node = server.repl_backlog->ref_repl_buf_node;
}
raxStop(&ri);
} else {
/* No recorded blocks, just from the start node to search. */
/* 如果复制积压缓冲区没有快速索引的话,将节点指针指向复制积压缓冲区的第一个节点 */
node = server.repl_backlog->ref_repl_buf_node;
}
/* Search the exact node. */
/* 从上面的判断中获取到节点之后,接着往后遍历节点 */
while (node != NULL) {
replBufBlock *o = listNodeValue(node);
/* 如果该节点的块的起始位置对应的复制积压缓冲区偏移量 + 块被使用的大小 >= offset 了,就表示找到了需要找的块节点 */
if (o->repl_offset + (long long)o->used >= offset) break;
/* 没找到就接着往下找 */
node = listNextNode(node);
}
serverAssert(node != NULL);
/* Install a writer handler first.*/
/* 副本写数据的前置处理 */
prepareClientToWrite(c);
/* Setting output buffer of the replica. */
/* 从当前找到的节点中获取复制积压缓冲区块 */
replBufBlock *o = listNodeValue(node);
/* 块的引用计数 +1 */
o->refcount++;
/* 设置副本需要开始复制的节点 */
c->ref_repl_buf_node = node;
/* 设置副本需要从该复制积压缓冲区块的什么位置开始复制
* 给定的需要开始复制的偏移量 - 块当前在复制积压缓冲区的复制偏移量 = 块内当前需要开始复制的偏移量*/
c->ref_block_pos = offset - o->repl_offset;
/* 返回当前复制积压缓冲区中需要复制的数据,skip 就是给定的 offset 之前的数据,因为要从 offset 处开始复制
* histlen 表示复制积压缓冲区中实际有的数据的大小,减去需要跳过的数据大小,剩下的就是副本要复制的数据大小 */
return server.repl_backlog->histlen - skip;
}
/* Return the offset to provide as reply to the PSYNC command received
* from the slave. The returned value is only valid immediately after
* the BGSAVE process started and before executing any other command
* from clients. */
long long getPsyncInitialOffset(void) {
return server.master_repl_offset;
}
/* Send a FULLRESYNC reply in the specific case of a full resynchronization,
* as a side effect setup the slave for a full sync in different ways:
*
* 1) Remember, into the slave client structure, the replication offset
* we sent here, so that if new slaves will later attach to the same
* background RDB saving process (by duplicating this client output
* buffer), we can get the right offset from this slave.
* 2) Set the replication state of the slave to WAIT_BGSAVE_END so that
* we start accumulating differences from this point.
* 3) Force the replication stream to re-emit a SELECT statement so
* the new slave incremental differences will start selecting the
* right database number.
*
* Normally this function should be called immediately after a successful
* BGSAVE for replication was started, or when there is one already in
* progress that we attached our slave to. */
/* 在完整重同步下发送一个 FULLRESYNC 回复用来设置从节点的状态,不同的回复有着不同的
* 作用:
* 1) 为了保证新的从节点在到达相同的后台 RDB 存储进程以后,我们也能在从节点中获取到
* 正确的偏移量,我们需要在 slave 客户端结构中保存我们已经发送的数据的复制偏移量
* 2) 设置从节点的复制状态为 WAIT_BGSAVE_END(RDB 文件准备完毕了,后面到的命令不
* 能再放到 RDB 文件中,而是放在复制积压缓冲区),然后开始累积当前主节点在当前时刻之
* 后接收到的写命令。
* 3) 强制复制流重新发出一个 SELECT 命令以保证主节点新增加的命令可以在从节点中正确
* 编号的数据库中执行
*
* 先进行 RDB 快照同步,与此同时记录复制积压缓冲区的偏移量,该偏移量之后到的命令需要在副
* 本接收完 rdb 后发送给副本 */
int replicationSetupSlaveForFullResync(client *slave, long long offset) {
char buf[128];
int buflen;
/* 将副本的部分重同步的初始偏移量设置为提供的偏移量(在开始 rdb 同步的那一刻的复制偏移量) */
slave->psync_initial_offset = offset;
/* 复制状态转换 */
slave->replstate = SLAVE_STATE_WAIT_BGSAVE_END;
/* We are going to accumulate the incremental changes for this
* slave as well. Set slaveseldb to -1 in order to force to re-emit
* a SELECT statement in the replication stream. */
/* 这里将从节点的数据库编号设置为 -1,后面会强制加一个 SELECT 来选择正确的数据库 */
server.slaveseldb = -1;
/* Don't send this reply to slaves that approached us with
* the old SYNC command. */
/* 如果副本不可以识别 PSYNC 报错 */
if (!(slave->flags & CLIENT_PRE_PSYNC)) {
buflen = snprintf(buf,sizeof(buf),"+FULLRESYNC %s %lld\r\n",
server.replid,offset);
if (connWrite(slave->conn,buf,buflen) != buflen) {
freeClientAsync(slave);
return C_ERR;
}
}
return C_OK;
}
/* This function handles the PSYNC command from the point of view of a
* master receiving a request for partial resynchronization.
*
* On success return C_OK, otherwise C_ERR is returned and we proceed
* with the usual full resync. */
/* 部分重同步 */
int masterTryPartialResynchronization(client *c, long long psync_offset) {
long long psync_len;
char *master_replid = c->argv[1]->ptr;
char buf[128];
int buflen;
/* Is the replication ID of this master the same advertised by the wannabe
* slave via PSYNC? If the replication ID changed this master has a
* different replication history, and there is no way to continue.
*
* Note that there are two potentially valid replication IDs: the ID1
* and the ID2. The ID2 however is only valid up to a specific offset. */
/* 判断当前服务(主节点)持有的复制 id 是否和想要部分重同步的从节点匹配,如果不匹配
* 表示主节点的历史复制和该从节点不符合,将不再继续接下来的步骤。(主节点已经开始新
* 一轮的部分重同步了,但是从节点还是以前旧的部分重同步的过程,需要重新进行完整重同
* 步) */
if (strcasecmp(master_replid, server.replid) &&
(strcasecmp(master_replid, server.replid2) ||
psync_offset > server.second_replid_offset))
{
/* Replid "?" is used by slaves that want to force a full resync. */
if (master_replid[0] != '?') {
if (strcasecmp(master_replid, server.replid) &&
strcasecmp(master_replid, server.replid2))
{
serverLog(LL_NOTICE,"Partial resynchronization not accepted: "
"Replication ID mismatch (Replica asked for '%s', my "
"replication IDs are '%s' and '%s')",
master_replid, server.replid, server.replid2);
} else {
serverLog(LL_NOTICE,"Partial resynchronization not accepted: "
"Requested offset for second ID was %lld, but I can reply "
"up to %lld", psync_offset, server.second_replid_offset);
}
} else {
serverLog(LL_NOTICE,"Full resync requested by replica %s",
replicationGetSlaveName(c));
}
/* 这里如果进入了该 if 中标识需要完整重同步 */
goto need_full_resync;
}
/* We still have the data our slave is asking for? */
/* 下面三种情况不能进行部分重同步
* 1.复制积压缓冲区不存在
* 2.需要部分重同步的偏移量小于当前复制积压缓冲区的偏移量
* 3.需要部分重同步的偏移量太大,超过了复制积压缓冲区的总数据量 */
if (!server.repl_backlog ||
psync_offset < server.repl_backlog->offset ||
psync_offset > (server.repl_backlog->offset + server.repl_backlog->histlen))
{
serverLog(LL_NOTICE,
"Unable to partial resync with replica %s for lack of backlog (Replica request was: %lld).", replicationGetSlaveName(c), psync_offset);
if (psync_offset > server.master_repl_offset) {
serverLog(LL_WARNING,
"Warning: replica %s tried to PSYNC with an offset that is greater than the master replication offset.", replicationGetSlaveName(c));
}
/* 需要完整重同步 */
goto need_full_resync;
}
/* If we reached this point, we are able to perform a partial resync:
* 1) Set client state to make it a slave.
* 2) Inform the client we can continue with +CONTINUE
* 3) Send the backlog data (from the offset to the end) to the slave. */
/* 如果运行到这里,说明是部分重同步 */
/* 将客户端的状态设置为副本 */
c->flags |= CLIENT_SLAVE;
/* 复制状态设置为副本在线状态 */
c->replstate = SLAVE_STATE_ONLINE;
/* 设置副本确认时间(会用来做超时判断) */
c->repl_ack_time = server.unixtime;
c->repl_start_cmd_stream_on_ack = 0;
/* 将副本添加到 server.slaves 尾部 */
listAddNodeTail(server.slaves,c);
/* We can't use the connection buffers since they are used to accumulate
* new commands at this stage. But we are sure the socket send buffer is
* empty so this write will never fail actually. */
/* 这里判断副本支持的主从复制版本, 支持 PSYNC2 协议 */
if (c->slave_capa & SLAVE_CAPA_PSYNC2) {
buflen = snprintf(buf,sizeof(buf),"+CONTINUE %s\r\n", server.replid);
} else {
buflen = snprintf(buf,sizeof(buf),"+CONTINUE\r\n");
}
if (connWrite(c->conn,buf,buflen) != buflen) {
freeClientAsync(c);
return C_OK;
}
/* 重要方法,发送复制日志给副本 */
psync_len = addReplyReplicationBacklog(c,psync_offset);
serverLog(LL_NOTICE,
"Partial resynchronization request from %s accepted. Sending %lld bytes of backlog starting from offset %lld.",
replicationGetSlaveName(c),
psync_len, psync_offset);
/* Note that we don't need to set the selected DB at server.slaveseldb
* to -1 to force the master to emit SELECT, since the slave already
* has this state from the previous connection with the master. */
/* 一般是对于一轮部分重同步第一次复制的时候会设置 server.slaveseldb 为 -1,强制从节点选择正确的
* 数据库,这里会记录这些从节点,下一次在同一轮部分重同步的时候做复制,可以不用在加 SELECT */
refreshGoodSlavesCount();
/* Fire the replica change modules event. */
/* 触发副本改变事件 */
moduleFireServerEvent(REDISMODULE_EVENT_REPLICA_CHANGE,
REDISMODULE_SUBEVENT_REPLICA_CHANGE_ONLINE,
NULL);
return C_OK; /* The caller can return, no full resync needed. */
/* 上面出现的一些不符合部分重同步要求的情况,返回 C_ERR 执行完成重同步 */
need_full_resync:
/* We need a full resync for some reason... Note that we can't
* reply to PSYNC right now if a full SYNC is needed. The reply
* must include the master offset at the time the RDB file we transfer
* is generated, so we need to delay the reply to that moment. */
return C_ERR;
}
/* Start a BGSAVE for replication goals, which is, selecting the disk or
* socket target depending on the configuration, and making sure that
* the script cache is flushed before to start.
*
* The mincapa argument is the bitwise AND among all the slaves capabilities
* of the slaves waiting for this BGSAVE, so represents the slave capabilities
* all the slaves support. Can be tested via SLAVE_CAPA_* macros.
*
* Side effects, other than starting a BGSAVE:
*
* 1) Handle the slaves in WAIT_START state, by preparing them for a full
* sync if the BGSAVE was successfully started, or sending them an error
* and dropping them from the list of slaves.
*
* 2) Flush the Lua scripting script cache if the BGSAVE was actually
* started.
*
* Returns C_OK on success or C_ERR otherwise. */
int startBgsaveForReplication(int mincapa, int req) {
int retval;
int socket_target = 0;
listIter li;
listNode *ln;
/* We use a socket target if slave can handle the EOF marker and we're configured to do diskless syncs.
* Note that in case we're creating a "filtered" RDB (functions-only, for example) we also force socket replication
* to avoid overwriting the snapshot RDB file with filtered data. */
/* 判断是使用 socket 发送 RDB 还是保存成文件之后再发送
* 如果当前主节点设置了主从复制通过 socket 发送 RDB 或者从节点发送的请求指定了要过滤数据 或者 过滤函数的情况,
* 且从节点有处理 EOF 标识的情况,就使用 socket 发送 RDB */
socket_target = (server.repl_diskless_sync || req & SLAVE_REQ_RDB_MASK) && (mincapa & SLAVE_CAPA_EOF);
/* `SYNC` should have failed with error if we don't support socket and require a filter, assert this here */
serverAssert(socket_target || !(req & SLAVE_REQ_RDB_MASK));