Skip to content

Commit 40ab188

Browse files
committed
Merge pull request #205 from jbenet/fix/move_proto_dht
redux: refactor(dht/pb) move proto to pb package
2 parents 2ce4187 + 0dba976 commit 40ab188

File tree

10 files changed

+87
-85
lines changed

10 files changed

+87
-85
lines changed

exchange/bitswap/strategy/strategy.go

+1-2
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,10 @@ import (
55
"sync"
66

77
bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message"
8-
"github.com/jbenet/go-ipfs/peer"
8+
peer "github.com/jbenet/go-ipfs/peer"
99
u "github.com/jbenet/go-ipfs/util"
1010
)
1111

12-
// TODO declare thread-safe datastore
1312
// TODO niceness should be on a per-peer basis. Use-case: Certain peers are
1413
// "trusted" and/or controlled by a single human user. The user may want for
1514
// these peers to exchange data freely

routing/dht/Makefile

-11
This file was deleted.

routing/dht/dht.go

+21-20
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ import (
1111
inet "github.com/jbenet/go-ipfs/net"
1212
msg "github.com/jbenet/go-ipfs/net/message"
1313
peer "github.com/jbenet/go-ipfs/peer"
14+
pb "github.com/jbenet/go-ipfs/routing/dht/pb"
1415
kb "github.com/jbenet/go-ipfs/routing/kbucket"
1516
u "github.com/jbenet/go-ipfs/util"
1617

@@ -128,7 +129,7 @@ func (dht *IpfsDHT) HandleMessage(ctx context.Context, mes msg.NetMessage) msg.N
128129
}
129130

130131
// deserialize msg
131-
pmes := new(Message)
132+
pmes := new(pb.Message)
132133
err := proto.Unmarshal(mData, pmes)
133134
if err != nil {
134135
log.Error("Error unmarshaling data")
@@ -140,7 +141,7 @@ func (dht *IpfsDHT) HandleMessage(ctx context.Context, mes msg.NetMessage) msg.N
140141

141142
// Print out diagnostic
142143
log.Debugf("%s got message type: '%s' from %s",
143-
dht.self, Message_MessageType_name[int32(pmes.GetType())], mPeer)
144+
dht.self, pb.Message_MessageType_name[int32(pmes.GetType())], mPeer)
144145

145146
// get handler for this msg type.
146147
handler := dht.handlerForMsgType(pmes.GetType())
@@ -174,7 +175,7 @@ func (dht *IpfsDHT) HandleMessage(ctx context.Context, mes msg.NetMessage) msg.N
174175

175176
// sendRequest sends out a request using dht.sender, but also makes sure to
176177
// measure the RTT for latency measurements.
177-
func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.Peer, pmes *Message) (*Message, error) {
178+
func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.Peer, pmes *pb.Message) (*pb.Message, error) {
178179

179180
mes, err := msg.FromObject(p, pmes)
180181
if err != nil {
@@ -185,7 +186,7 @@ func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.Peer, pmes *Message)
185186

186187
// Print out diagnostic
187188
log.Debugf("Sent message type: '%s' to %s",
188-
Message_MessageType_name[int32(pmes.GetType())], p)
189+
pb.Message_MessageType_name[int32(pmes.GetType())], p)
189190

190191
rmes, err := dht.sender.SendRequest(ctx, mes)
191192
if err != nil {
@@ -198,7 +199,7 @@ func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.Peer, pmes *Message)
198199
rtt := time.Since(start)
199200
rmes.Peer().SetLatency(rtt)
200201

201-
rpmes := new(Message)
202+
rpmes := new(pb.Message)
202203
if err := proto.Unmarshal(rmes.Data(), rpmes); err != nil {
203204
return nil, err
204205
}
@@ -210,7 +211,7 @@ func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.Peer, pmes *Message)
210211
func (dht *IpfsDHT) putValueToNetwork(ctx context.Context, p peer.Peer,
211212
key string, value []byte) error {
212213

213-
pmes := newMessage(Message_PUT_VALUE, string(key), 0)
214+
pmes := pb.NewMessage(pb.Message_PUT_VALUE, string(key), 0)
214215
pmes.Value = value
215216
rpmes, err := dht.sendRequest(ctx, p, pmes)
216217
if err != nil {
@@ -225,10 +226,10 @@ func (dht *IpfsDHT) putValueToNetwork(ctx context.Context, p peer.Peer,
225226

226227
func (dht *IpfsDHT) putProvider(ctx context.Context, p peer.Peer, key string) error {
227228

228-
pmes := newMessage(Message_ADD_PROVIDER, string(key), 0)
229+
pmes := pb.NewMessage(pb.Message_ADD_PROVIDER, string(key), 0)
229230

230231
// add self as the provider
231-
pmes.ProviderPeers = peersToPBPeers([]peer.Peer{dht.self})
232+
pmes.ProviderPeers = pb.PeersToPBPeers([]peer.Peer{dht.self})
232233

233234
rpmes, err := dht.sendRequest(ctx, p, pmes)
234235
if err != nil {
@@ -290,9 +291,9 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.Peer,
290291

291292
// getValueSingle simply performs the get value RPC with the given parameters
292293
func (dht *IpfsDHT) getValueSingle(ctx context.Context, p peer.Peer,
293-
key u.Key, level int) (*Message, error) {
294+
key u.Key, level int) (*pb.Message, error) {
294295

295-
pmes := newMessage(Message_GET_VALUE, string(key), level)
296+
pmes := pb.NewMessage(pb.Message_GET_VALUE, string(key), level)
296297
return dht.sendRequest(ctx, p, pmes)
297298
}
298299

@@ -301,7 +302,7 @@ func (dht *IpfsDHT) getValueSingle(ctx context.Context, p peer.Peer,
301302
// one to get the value from? Or just connect to one at a time until we get a
302303
// successful connection and request the value from it?
303304
func (dht *IpfsDHT) getFromPeerList(ctx context.Context, key u.Key,
304-
peerlist []*Message_Peer, level int) ([]byte, error) {
305+
peerlist []*pb.Message_Peer, level int) ([]byte, error) {
305306

306307
for _, pinfo := range peerlist {
307308
p, err := dht.ensureConnectedToPeer(pinfo)
@@ -379,17 +380,17 @@ func (dht *IpfsDHT) FindLocal(id peer.ID) (peer.Peer, *kb.RoutingTable) {
379380
return nil, nil
380381
}
381382

382-
func (dht *IpfsDHT) findPeerSingle(ctx context.Context, p peer.Peer, id peer.ID, level int) (*Message, error) {
383-
pmes := newMessage(Message_FIND_NODE, string(id), level)
383+
func (dht *IpfsDHT) findPeerSingle(ctx context.Context, p peer.Peer, id peer.ID, level int) (*pb.Message, error) {
384+
pmes := pb.NewMessage(pb.Message_FIND_NODE, string(id), level)
384385
return dht.sendRequest(ctx, p, pmes)
385386
}
386387

387-
func (dht *IpfsDHT) findProvidersSingle(ctx context.Context, p peer.Peer, key u.Key, level int) (*Message, error) {
388-
pmes := newMessage(Message_GET_PROVIDERS, string(key), level)
388+
func (dht *IpfsDHT) findProvidersSingle(ctx context.Context, p peer.Peer, key u.Key, level int) (*pb.Message, error) {
389+
pmes := pb.NewMessage(pb.Message_GET_PROVIDERS, string(key), level)
389390
return dht.sendRequest(ctx, p, pmes)
390391
}
391392

392-
func (dht *IpfsDHT) addProviders(key u.Key, peers []*Message_Peer) []peer.Peer {
393+
func (dht *IpfsDHT) addProviders(key u.Key, peers []*pb.Message_Peer) []peer.Peer {
393394
var provArr []peer.Peer
394395
for _, prov := range peers {
395396
p, err := dht.peerFromInfo(prov)
@@ -413,7 +414,7 @@ func (dht *IpfsDHT) addProviders(key u.Key, peers []*Message_Peer) []peer.Peer {
413414
}
414415

415416
// nearestPeersToQuery returns the routing tables closest peers.
416-
func (dht *IpfsDHT) nearestPeersToQuery(pmes *Message, count int) []peer.Peer {
417+
func (dht *IpfsDHT) nearestPeersToQuery(pmes *pb.Message, count int) []peer.Peer {
417418
level := pmes.GetClusterLevel()
418419
cluster := dht.routingTables[level]
419420

@@ -423,7 +424,7 @@ func (dht *IpfsDHT) nearestPeersToQuery(pmes *Message, count int) []peer.Peer {
423424
}
424425

425426
// betterPeerToQuery returns nearestPeersToQuery, but iff closer than self.
426-
func (dht *IpfsDHT) betterPeersToQuery(pmes *Message, count int) []peer.Peer {
427+
func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, count int) []peer.Peer {
427428
closer := dht.nearestPeersToQuery(pmes, count)
428429

429430
// no node? nil
@@ -462,7 +463,7 @@ func (dht *IpfsDHT) getPeer(id peer.ID) (peer.Peer, error) {
462463
return p, nil
463464
}
464465

465-
func (dht *IpfsDHT) peerFromInfo(pbp *Message_Peer) (peer.Peer, error) {
466+
func (dht *IpfsDHT) peerFromInfo(pbp *pb.Message_Peer) (peer.Peer, error) {
466467

467468
id := peer.ID(pbp.GetId())
468469

@@ -485,7 +486,7 @@ func (dht *IpfsDHT) peerFromInfo(pbp *Message_Peer) (peer.Peer, error) {
485486
return p, nil
486487
}
487488

488-
func (dht *IpfsDHT) ensureConnectedToPeer(pbp *Message_Peer) (peer.Peer, error) {
489+
func (dht *IpfsDHT) ensureConnectedToPeer(pbp *pb.Message_Peer) (peer.Peer, error) {
489490
p, err := dht.peerFromInfo(pbp)
490491
if err != nil {
491492
return nil, err

routing/dht/ext_test.go

+14-13
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ import (
1212
msg "github.com/jbenet/go-ipfs/net/message"
1313
mux "github.com/jbenet/go-ipfs/net/mux"
1414
peer "github.com/jbenet/go-ipfs/peer"
15+
pb "github.com/jbenet/go-ipfs/routing/dht/pb"
1516
u "github.com/jbenet/go-ipfs/util"
1617

1718
"time"
@@ -127,13 +128,13 @@ func TestGetFailures(t *testing.T) {
127128
// u.POut("NotFound Test\n")
128129
// Reply with failures to every message
129130
fs.AddHandler(func(mes msg.NetMessage) msg.NetMessage {
130-
pmes := new(Message)
131+
pmes := new(pb.Message)
131132
err := proto.Unmarshal(mes.Data(), pmes)
132133
if err != nil {
133134
t.Fatal(err)
134135
}
135136

136-
resp := &Message{
137+
resp := &pb.Message{
137138
Type: pmes.Type,
138139
}
139140
m, err := msg.FromObject(mes.Peer(), resp)
@@ -153,9 +154,9 @@ func TestGetFailures(t *testing.T) {
153154

154155
fs.handlers = nil
155156
// Now we test this DHT's handleGetValue failure
156-
typ := Message_GET_VALUE
157+
typ := pb.Message_GET_VALUE
157158
str := "hello"
158-
req := Message{
159+
req := pb.Message{
159160
Type: &typ,
160161
Key: &str,
161162
Value: []byte{0},
@@ -169,7 +170,7 @@ func TestGetFailures(t *testing.T) {
169170

170171
mes = d.HandleMessage(ctx, mes)
171172

172-
pmes := new(Message)
173+
pmes := new(pb.Message)
173174
err = proto.Unmarshal(mes.Data(), pmes)
174175
if err != nil {
175176
t.Fatal(err)
@@ -215,21 +216,21 @@ func TestNotFound(t *testing.T) {
215216

216217
// Reply with random peers to every message
217218
fs.AddHandler(func(mes msg.NetMessage) msg.NetMessage {
218-
pmes := new(Message)
219+
pmes := new(pb.Message)
219220
err := proto.Unmarshal(mes.Data(), pmes)
220221
if err != nil {
221222
t.Fatal(err)
222223
}
223224

224225
switch pmes.GetType() {
225-
case Message_GET_VALUE:
226-
resp := &Message{Type: pmes.Type}
226+
case pb.Message_GET_VALUE:
227+
resp := &pb.Message{Type: pmes.Type}
227228

228229
peers := []peer.Peer{}
229230
for i := 0; i < 7; i++ {
230231
peers = append(peers, _randPeer())
231232
}
232-
resp.CloserPeers = peersToPBPeers(peers)
233+
resp.CloserPeers = pb.PeersToPBPeers(peers)
233234
mes, err := msg.FromObject(mes.Peer(), resp)
234235
if err != nil {
235236
t.Error(err)
@@ -282,17 +283,17 @@ func TestLessThanKResponses(t *testing.T) {
282283

283284
// Reply with random peers to every message
284285
fs.AddHandler(func(mes msg.NetMessage) msg.NetMessage {
285-
pmes := new(Message)
286+
pmes := new(pb.Message)
286287
err := proto.Unmarshal(mes.Data(), pmes)
287288
if err != nil {
288289
t.Fatal(err)
289290
}
290291

291292
switch pmes.GetType() {
292-
case Message_GET_VALUE:
293-
resp := &Message{
293+
case pb.Message_GET_VALUE:
294+
resp := &pb.Message{
294295
Type: pmes.Type,
295-
CloserPeers: peersToPBPeers([]peer.Peer{other}),
296+
CloserPeers: pb.PeersToPBPeers([]peer.Peer{other}),
296297
}
297298

298299
mes, err := msg.FromObject(mes.Peer(), resp)

0 commit comments

Comments
 (0)