@@ -12,9 +12,14 @@ import (
12
12
logging "github.com/ipfs/go-log"
13
13
loggables "github.com/libp2p/go-libp2p-loggables"
14
14
peer "github.com/libp2p/go-libp2p-peer"
15
+
16
+ bssrs "github.com/ipfs/go-bitswap/sessionrequestsplitter"
15
17
)
16
18
17
- const activeWantsLimit = 16
19
+ const (
20
+ broadcastLiveWantsLimit = 4
21
+ targetedLiveWantsLimit = 32
22
+ )
18
23
19
24
// WantManager is an interface that can be used to request blocks
20
25
// from given peers.
@@ -32,14 +37,23 @@ type PeerManager interface {
32
37
RecordPeerResponse (peer.ID , cid.Cid )
33
38
}
34
39
40
+ // RequestSplitter provides an interface for splitting
41
+ // a request for Cids up among peers.
42
+ type RequestSplitter interface {
43
+ SplitRequest ([]peer.ID , []cid.Cid ) []* bssrs.PartialRequest
44
+ RecordDuplicateBlock ()
45
+ RecordUniqueBlock ()
46
+ }
47
+
35
48
type interestReq struct {
36
49
c cid.Cid
37
50
resp chan bool
38
51
}
39
52
40
53
type blkRecv struct {
41
- from peer.ID
42
- blk blocks.Block
54
+ from peer.ID
55
+ blk blocks.Block
56
+ counterMessage bool
43
57
}
44
58
45
59
// Session holds state for an individual bitswap transfer operation.
@@ -50,6 +64,7 @@ type Session struct {
50
64
ctx context.Context
51
65
wm WantManager
52
66
pm PeerManager
67
+ srs RequestSplitter
53
68
54
69
// channels
55
70
incoming chan blkRecv
@@ -62,12 +77,12 @@ type Session struct {
62
77
// do not touch outside run loop
63
78
tofetch * cidQueue
64
79
interest * lru.Cache
80
+ pastWants * cidQueue
65
81
liveWants map [cid.Cid ]time.Time
66
82
tick * time.Timer
67
83
baseTickDelay time.Duration
68
84
latTotal time.Duration
69
85
fetchcnt int
70
-
71
86
// identifiers
72
87
notif notifications.PubSub
73
88
uuid logging.Loggable
@@ -76,18 +91,20 @@ type Session struct {
76
91
77
92
// New creates a new bitswap session whose lifetime is bounded by the
78
93
// given context.
79
- func New (ctx context.Context , id uint64 , wm WantManager , pm PeerManager ) * Session {
94
+ func New (ctx context.Context , id uint64 , wm WantManager , pm PeerManager , srs RequestSplitter ) * Session {
80
95
s := & Session {
81
96
liveWants : make (map [cid.Cid ]time.Time ),
82
97
newReqs : make (chan []cid.Cid ),
83
98
cancelKeys : make (chan []cid.Cid ),
84
99
tofetch : newCidQueue (),
100
+ pastWants : newCidQueue (),
85
101
interestReqs : make (chan interestReq ),
86
102
latencyReqs : make (chan chan time.Duration ),
87
103
tickDelayReqs : make (chan time.Duration ),
88
104
ctx : ctx ,
89
105
wm : wm ,
90
106
pm : pm ,
107
+ srs : srs ,
91
108
incoming : make (chan blkRecv ),
92
109
notif : notifications .New (),
93
110
uuid : loggables .Uuid ("GetBlockRequest" ),
@@ -106,14 +123,23 @@ func New(ctx context.Context, id uint64, wm WantManager, pm PeerManager) *Sessio
106
123
// ReceiveBlockFrom receives an incoming block from the given peer.
107
124
func (s * Session ) ReceiveBlockFrom (from peer.ID , blk blocks.Block ) {
108
125
select {
109
- case s .incoming <- blkRecv {from : from , blk : blk }:
126
+ case s .incoming <- blkRecv {from : from , blk : blk , counterMessage : false }:
110
127
case <- s .ctx .Done ():
111
128
}
112
129
ks := []cid.Cid {blk .Cid ()}
113
130
s .wm .CancelWants (s .ctx , ks , nil , s .id )
114
131
115
132
}
116
133
134
+ // UpdateReceiveCounters updates receive counters for a block,
135
+ // which may be a duplicate and adjusts the split factor based on that.
136
+ func (s * Session ) UpdateReceiveCounters (blk blocks.Block ) {
137
+ select {
138
+ case s .incoming <- blkRecv {from : "" , blk : blk , counterMessage : true }:
139
+ case <- s .ctx .Done ():
140
+ }
141
+ }
142
+
117
143
// InterestedIn returns true if this session is interested in the given Cid.
118
144
func (s * Session ) InterestedIn (c cid.Cid ) bool {
119
145
if s .interest .Contains (c ) {
@@ -205,7 +231,11 @@ func (s *Session) run(ctx context.Context) {
205
231
for {
206
232
select {
207
233
case blk := <- s .incoming :
208
- s .handleIncomingBlock (ctx , blk )
234
+ if blk .counterMessage {
235
+ s .updateReceiveCounters (ctx , blk )
236
+ } else {
237
+ s .handleIncomingBlock (ctx , blk )
238
+ }
209
239
case keys := <- s .newReqs :
210
240
s .handleNewRequest (ctx , keys )
211
241
case keys := <- s .cancelKeys :
@@ -241,8 +271,7 @@ func (s *Session) handleNewRequest(ctx context.Context, keys []cid.Cid) {
241
271
for _ , k := range keys {
242
272
s .interest .Add (k , nil )
243
273
}
244
- if len (s .liveWants ) < activeWantsLimit {
245
- toadd := activeWantsLimit - len (s .liveWants )
274
+ if toadd := s .wantBudget (); toadd > 0 {
246
275
if toadd > len (keys ) {
247
276
toadd = len (keys )
248
277
}
@@ -264,6 +293,7 @@ func (s *Session) handleCancel(keys []cid.Cid) {
264
293
}
265
294
266
295
func (s * Session ) handleTick (ctx context.Context ) {
296
+
267
297
live := make ([]cid.Cid , 0 , len (s .liveWants ))
268
298
now := time .Now ()
269
299
for c := range s .liveWants {
@@ -303,6 +333,7 @@ func (s *Session) cidIsWanted(c cid.Cid) bool {
303
333
func (s * Session ) receiveBlock (ctx context.Context , blk blocks.Block ) {
304
334
c := blk .Cid ()
305
335
if s .cidIsWanted (c ) {
336
+ s .srs .RecordUniqueBlock ()
306
337
tval , ok := s .liveWants [c ]
307
338
if ok {
308
339
s .latTotal += time .Since (tval )
@@ -313,9 +344,26 @@ func (s *Session) receiveBlock(ctx context.Context, blk blocks.Block) {
313
344
s .fetchcnt ++
314
345
s .notif .Publish (blk )
315
346
316
- if next := s .tofetch .Pop (); next .Defined () {
317
- s .wantBlocks (ctx , []cid.Cid {next })
347
+ toAdd := s .wantBudget ()
348
+ if toAdd > s .tofetch .Len () {
349
+ toAdd = s .tofetch .Len ()
350
+ }
351
+ if toAdd > 0 {
352
+ var keys []cid.Cid
353
+ for i := 0 ; i < toAdd ; i ++ {
354
+ keys = append (keys , s .tofetch .Pop ())
355
+ }
356
+ s .wantBlocks (ctx , keys )
318
357
}
358
+
359
+ s .pastWants .Push (c )
360
+ }
361
+ }
362
+
363
+ func (s * Session ) updateReceiveCounters (ctx context.Context , blk blkRecv ) {
364
+ ks := blk .blk .Cid ()
365
+ if s .pastWants .Has (ks ) {
366
+ s .srs .RecordDuplicateBlock ()
319
367
}
320
368
}
321
369
@@ -325,9 +373,16 @@ func (s *Session) wantBlocks(ctx context.Context, ks []cid.Cid) {
325
373
s .liveWants [c ] = now
326
374
}
327
375
peers := s .pm .GetOptimizedPeers ()
328
- // right now we're requesting each block from every peer, but soon, maybe not
329
- s .pm .RecordPeerRequests (peers , ks )
330
- s .wm .WantBlocks (ctx , ks , peers , s .id )
376
+ if len (peers ) > 0 {
377
+ splitRequests := s .srs .SplitRequest (peers , ks )
378
+ for _ , splitRequest := range splitRequests {
379
+ s .pm .RecordPeerRequests (splitRequest .Peers , splitRequest .Keys )
380
+ s .wm .WantBlocks (ctx , splitRequest .Keys , splitRequest .Peers , s .id )
381
+ }
382
+ } else {
383
+ s .pm .RecordPeerRequests (nil , ks )
384
+ s .wm .WantBlocks (ctx , ks , nil , s .id )
385
+ }
331
386
}
332
387
333
388
func (s * Session ) averageLatency () time.Duration {
@@ -342,3 +397,17 @@ func (s *Session) resetTick() {
342
397
s .tick .Reset (s .baseTickDelay + (3 * avLat ))
343
398
}
344
399
}
400
+
401
+ func (s * Session ) wantBudget () int {
402
+ live := len (s .liveWants )
403
+ var budget int
404
+ if len (s .pm .GetOptimizedPeers ()) > 0 {
405
+ budget = targetedLiveWantsLimit - live
406
+ } else {
407
+ budget = broadcastLiveWantsLimit - live
408
+ }
409
+ if budget < 0 {
410
+ budget = 0
411
+ }
412
+ return budget
413
+ }
0 commit comments