8
8
9
9
bsmsg "github.com/ipfs/go-bitswap/message"
10
10
wl "github.com/ipfs/go-bitswap/wantlist"
11
+ cid "github.com/ipfs/go-cid"
12
+ "github.com/ipfs/go-peertaskqueue"
13
+ "github.com/ipfs/go-peertaskqueue/peertask"
11
14
12
15
blocks "github.com/ipfs/go-block-format"
13
16
bstore "github.com/ipfs/go-ipfs-blockstore"
@@ -73,7 +76,7 @@ type Engine struct {
73
76
// peerRequestQueue is a priority queue of requests received from peers.
74
77
// Requests are popped from the queue, packaged up, and placed in the
75
78
// outbox.
76
- peerRequestQueue * prq
79
+ peerRequestQueue * peertaskqueue. PeerTaskQueue
77
80
78
81
// FIXME it's a bit odd for the client and the worker to both share memory
79
82
// (both modify the peerRequestQueue) and also to communicate over the
@@ -100,7 +103,7 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine {
100
103
e := & Engine {
101
104
ledgerMap : make (map [peer.ID ]* ledger ),
102
105
bs : bs ,
103
- peerRequestQueue : newPRQ (),
106
+ peerRequestQueue : peertaskqueue . New (),
104
107
outbox : make (chan (<- chan * Envelope ), outboxChanBuffer ),
105
108
workSignal : make (chan struct {}, 1 ),
106
109
ticker : time .NewTicker (time .Millisecond * 100 ),
@@ -159,23 +162,23 @@ func (e *Engine) taskWorker(ctx context.Context) {
159
162
// context is cancelled before the next Envelope can be created.
160
163
func (e * Engine ) nextEnvelope (ctx context.Context ) (* Envelope , error ) {
161
164
for {
162
- nextTask := e .peerRequestQueue .Pop ()
165
+ nextTask := e .peerRequestQueue .PopBlock ()
163
166
for nextTask == nil {
164
167
select {
165
168
case <- ctx .Done ():
166
169
return nil , ctx .Err ()
167
170
case <- e .workSignal :
168
- nextTask = e .peerRequestQueue .Pop ()
171
+ nextTask = e .peerRequestQueue .PopBlock ()
169
172
case <- e .ticker .C :
170
- e .peerRequestQueue .thawRound ()
171
- nextTask = e .peerRequestQueue .Pop ()
173
+ e .peerRequestQueue .ThawRound ()
174
+ nextTask = e .peerRequestQueue .PopBlock ()
172
175
}
173
176
}
174
177
175
178
// with a task in hand, we're ready to prepare the envelope...
176
179
msg := bsmsg .New (true )
177
- for _ , entry := range nextTask .Entries {
178
- block , err := e .bs .Get (entry .Cid )
180
+ for _ , entry := range nextTask .Tasks {
181
+ block , err := e .bs .Get (entry .Identifier .(cid. Cid ) )
179
182
if err != nil {
180
183
log .Errorf ("tried to execute a task and errored fetching block: %s" , err )
181
184
continue
@@ -186,15 +189,15 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) {
186
189
if msg .Empty () {
187
190
// If we don't have the block, don't hold that against the peer
188
191
// make sure to update that the task has been 'completed'
189
- nextTask .Done (nextTask .Entries )
192
+ nextTask .Done (nextTask .Tasks )
190
193
continue
191
194
}
192
195
193
196
return & Envelope {
194
197
Peer : nextTask .Target ,
195
198
Message : msg ,
196
199
Sent : func () {
197
- nextTask .Done (nextTask .Entries )
200
+ nextTask .Done (nextTask .Tasks )
198
201
select {
199
202
case e .workSignal <- struct {}{}:
200
203
// work completing may mean that our queue will provide new
@@ -246,7 +249,7 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) {
246
249
}
247
250
248
251
var msgSize int
249
- var activeEntries []wl. Entry
252
+ var activeEntries []peertask. Task
250
253
for _ , entry := range m .Wantlist () {
251
254
if entry .Cancel {
252
255
log .Debugf ("%s cancel %s" , p , entry .Cid )
@@ -265,17 +268,17 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) {
265
268
// we have the block
266
269
newWorkExists = true
267
270
if msgSize + blockSize > maxMessageSize {
268
- e .peerRequestQueue .Push (p , activeEntries ... )
269
- activeEntries = []wl. Entry {}
271
+ e .peerRequestQueue .PushBlock (p , activeEntries ... )
272
+ activeEntries = []peertask. Task {}
270
273
msgSize = 0
271
274
}
272
- activeEntries = append (activeEntries , entry .Entry )
275
+ activeEntries = append (activeEntries , peertask. Task { Identifier : entry .Cid , Priority : entry . Priority } )
273
276
msgSize += blockSize
274
277
}
275
278
}
276
279
}
277
280
if len (activeEntries ) > 0 {
278
- e .peerRequestQueue .Push (p , activeEntries ... )
281
+ e .peerRequestQueue .PushBlock (p , activeEntries ... )
279
282
}
280
283
for _ , block := range m .Blocks () {
281
284
log .Debugf ("got block %s %d bytes" , block , len (block .RawData ()))
@@ -289,7 +292,10 @@ func (e *Engine) addBlock(block blocks.Block) {
289
292
for _ , l := range e .ledgerMap {
290
293
l .lk .Lock ()
291
294
if entry , ok := l .WantListContains (block .Cid ()); ok {
292
- e .peerRequestQueue .Push (l .Partner , entry )
295
+ e .peerRequestQueue .PushBlock (l .Partner , peertask.Task {
296
+ Identifier : entry .Cid ,
297
+ Priority : entry .Priority ,
298
+ })
293
299
work = true
294
300
}
295
301
l .lk .Unlock ()
0 commit comments