This repository was archived by the owner on Sep 20, 2022. It is now read-only.
forked from dedis/onet
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlocal.go
500 lines (448 loc) · 14.1 KB
/
local.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
package onet
import (
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
"sync"
"testing"
"time"
"go.dedis.ch/onet/v3/ciphersuite"
"go.dedis.ch/onet/v3/log"
"go.dedis.ch/onet/v3/network"
"golang.org/x/xerrors"
)
// LeakyTestCheck represents an enum to indicate how deep CloseAll needs to
// check the tests.
type LeakyTestCheck int
const (
// CheckNone will make CloseAll not check anything.
CheckNone LeakyTestCheck = iota + 1
// CheckGoroutines will only check for leaking goroutines.
CheckGoroutines
// CheckAll will also check for leaking Overlay.Processors and
// ProtocolInstances.
CheckAll
)
// TestClose interface allows a service to clean up for the tests. It will only
// be called when a test calls `LocalTest.CloseAll()`.
type TestClose interface {
// TestClose can clean up things needed in the service.
TestClose()
}
// LocalTest represents all that is needed for a local test-run
type LocalTest struct {
builder Builder
// A map of ServerIdentity.Id to Servers
Servers map[network.ServerIdentityID]*Server
// A map of ServerIdentity.Id to Overlays
Overlays map[network.ServerIdentityID]*Overlay
// A map of ServerIdentity.Id to Services
Services map[network.ServerIdentityID]map[string]Service
// A map of Tree.Id to Trees
Trees map[TreeID]*Tree
// All single nodes
Nodes []*TreeNodeInstance
// How carefully to check for leaking resources at the end of the test.
Check LeakyTestCheck
path string
// Once closed is set, do not allow further operations on it,
// since now the temp directory is gone.
closed bool
T *testing.T
// keep the latestPort used so that we can add nodes later
latestPort int
}
const (
// TCP represents the TCP mode of networking for this local test
TCP = "tcp"
// Local represents the Local mode of networking for this local test
Local = "local"
)
// NewLocalTest creates a new Local handler that can be used to test protocols
// locally
func NewLocalTest(builder Builder) *LocalTest {
dir, err := ioutil.TempDir("", "onet")
if err != nil {
log.Fatal("could not create temp directory: ", err)
}
si := builder.Identity()
port, err := strconv.Atoi(si.Address.Port())
if err != nil {
log.Fatal("could not parse the base port: ", err)
}
return &LocalTest{
Servers: make(map[network.ServerIdentityID]*Server),
Overlays: make(map[network.ServerIdentityID]*Overlay),
Services: make(map[network.ServerIdentityID]map[string]Service),
Trees: make(map[TreeID]*Tree),
Nodes: make([]*TreeNodeInstance, 0, 1),
Check: CheckAll,
builder: builder.Clone(),
path: dir,
latestPort: port,
}
}
// StartProtocol takes a name and a tree and will create a
// new Node with the protocol 'name' running from the tree-root
func (l *LocalTest) StartProtocol(name string, t *Tree) (ProtocolInstance, error) {
l.panicClosed()
rootServerIdentityID := t.Root.ServerIdentity.ID
for _, h := range l.Servers {
if h.ServerIdentity.ID.Equal(rootServerIdentityID) {
// XXX do we really need multiples overlays ? Can't we just use the
// Node, since it is already dispatched as like a TreeNode ?
pi, err := l.Overlays[h.ServerIdentity.ID].StartProtocol(name, t, NilServiceID)
if err != nil {
return nil, xerrors.Errorf("creating protocol: %v", err)
}
return pi, nil
}
}
return nil, xerrors.New("Didn't find server for tree-root")
}
// CreateProtocol takes a name and a tree and will create a
// new Node with the protocol 'name' without running it
func (l *LocalTest) CreateProtocol(name string, t *Tree) (ProtocolInstance, error) {
l.panicClosed()
rootServerIdentityID := t.Root.ServerIdentity.ID
for _, h := range l.Servers {
if h.ServerIdentity.ID.Equal(rootServerIdentityID) {
// XXX do we really need multiples overlays ? Can't we just use the
// Node, since it is already dispatched as like a TreeNode ?
pi, err := l.Overlays[h.ServerIdentity.ID].CreateProtocol(name, t, NilServiceID)
if err != nil {
return nil, xerrors.Errorf("creating protocol: %v", err)
}
return pi, nil
}
}
return nil, xerrors.New("Didn't find server for tree-root")
}
// GenServers returns n Servers with a localRouter
func (l *LocalTest) GenServers(n int) []*Server {
l.panicClosed()
servers := l.genLocalHosts(n)
for _, server := range servers {
server.ServerIdentity.SetPrivate(server.secretKey.Raw())
l.Servers[server.ServerIdentity.ID] = server
l.Overlays[server.ServerIdentity.ID] = server.overlay
l.Services[server.ServerIdentity.ID] = server.serviceManager.services
}
return servers
}
// GenTree will create a tree of n servers with a localRouter, and returns the
// list of servers and the associated roster / tree.
func (l *LocalTest) GenTree(n int, register bool) ([]*Server, *Roster, *Tree) {
l.panicClosed()
servers := l.GenServers(n)
list := l.GenRosterFromHost(servers...)
tree := list.GenerateBinaryTree()
l.Trees[tree.ID] = tree
if register {
servers[0].overlay.RegisterTree(tree)
}
return servers, list, tree
}
// GenBigTree will create a tree of n servers.
// If register is true, the Roster and Tree will be registered with the overlay.
// 'nbrServers' is how many servers are created
// 'nbrTreeNodes' is how many TreeNodes are created
// nbrServers can be smaller than nbrTreeNodes, in which case a given server will
// be used more than once in the tree.
func (l *LocalTest) GenBigTree(nbrTreeNodes, nbrServers, bf int, register bool) ([]*Server, *Roster, *Tree) {
l.panicClosed()
servers := l.GenServers(nbrServers)
list := l.GenRosterFromHost(servers...)
tree := list.GenerateBigNaryTree(bf, nbrTreeNodes)
l.Trees[tree.ID] = tree
if register {
servers[0].overlay.RegisterTree(tree)
}
return servers, list, tree
}
// GenRosterFromHost takes a number of servers as arguments and creates
// an Roster.
func (l *LocalTest) GenRosterFromHost(servers ...*Server) *Roster {
l.panicClosed()
var entities []*network.ServerIdentity
for i := range servers {
entities = append(entities, servers[i].ServerIdentity)
}
return NewRoster(entities)
}
func (l *LocalTest) panicClosed() {
if l.closed {
panic("attempt to use LocalTest after CloseAll")
}
}
// WaitDone loops until all protocolInstances are done or
// the timeout is reached. If all protocolInstances are closed
// within the timeout, nil is returned.
func (l *LocalTest) WaitDone(t time.Duration) error {
var lingering []string
for i := 0; i < 10; i++ {
lingering = []string{}
for _, o := range l.Overlays {
o.instancesLock.Lock()
for si, pi := range o.protocolInstances {
lingering = append(lingering, fmt.Sprintf("ProtocolInstance type %T on %s with id %s",
pi, o.ServerIdentity(), si))
}
o.instancesLock.Unlock()
}
for _, s := range l.Servers {
disp, ok := s.serviceManager.Dispatcher.(*network.RoutineDispatcher)
if ok && disp.GetRoutines() > 0 {
lingering = append(lingering, fmt.Sprintf("RoutineDispatcher has %v routines running on %s", disp.GetRoutines(), s.ServerIdentity))
}
}
if len(lingering) == 0 {
return nil
}
time.Sleep(t / 10)
}
return xerrors.New("still have things lingering: " + strings.Join(lingering, "\n"))
}
// CloseAll closes all the servers.
func (l *LocalTest) CloseAll() {
log.Lvl3("Stopping all")
if r := recover(); r != nil {
// Make sure that a panic is correctly caught, as CloseAll is most often
// called in a `defer` statement, and we don't want to show leaking
// go-routines or hanging protocolInstances if a panic occurs.
panic(r)
}
if l.T != nil && l.T.Failed() {
return
}
InformAllServersStopped()
// If the debug-level is 0, we copy all errors to a buffer that
// will be discarded at the end.
if log.DebugVisible() == 0 {
log.OutputToBuf()
}
var wg sync.WaitGroup
for _, srv := range l.Servers {
wg.Add(1)
go func(s *Server) {
s.callTestClose()
wg.Done()
}(srv)
}
wg.Wait()
if err := l.WaitDone(5 * time.Second); err != nil {
switch l.Check {
case CheckNone:
// Ignore waitDone
case CheckGoroutines:
// Only print a warning
if l.T != nil {
l.T.Log("Warning:", err)
} else {
log.Warn("Warning:", err)
}
case CheckAll:
// Fail if there are leaking processes or protocolInstances
if l.T != nil {
l.T.Fatal(err.Error())
} else {
log.Fatal(err.Error())
}
}
}
for _, node := range l.Nodes {
log.Lvl3("Closing node", node)
err := node.closeDispatch()
if err != nil {
log.Error("Error while closing dispatcher:", err)
}
}
l.Nodes = make([]*TreeNodeInstance, 0)
sd := sync.WaitGroup{}
for _, srv := range l.Servers {
sd.Add(1)
go func(server *Server) {
if server.Closed() {
// Server has already been closed previously so we skip.
sd.Done()
return
}
log.Lvl3("Closing server", server.ServerIdentity.Address)
err := server.Close()
if err != nil {
log.Error("Closing server", server.ServerIdentity.Address,
"gives error", err)
}
for server.Listening() {
log.Lvl1("Sleeping while waiting to close...")
time.Sleep(10 * time.Millisecond)
}
sd.Done()
}(srv)
}
sd.Wait()
l.Servers = map[network.ServerIdentityID]*Server{}
err := os.RemoveAll(l.path)
if err != nil {
log.Error("Error while removing all db-files:", err)
}
l.closed = true
if log.DebugVisible() == 0 {
log.OutputToOs()
}
if l.Check != CheckNone {
log.AfterTest(nil)
}
}
// getTree returns the tree of the given TreeNode
func (l *LocalTest) getTree(tn *TreeNode) *Tree {
l.panicClosed()
var tree *Tree
for _, t := range l.Trees {
if tn.IsInTree(t) {
tree = t
break
}
}
return tree
}
// NewTreeNodeInstance creates a new node on a TreeNode
func (l *LocalTest) NewTreeNodeInstance(tn *TreeNode, protName string) (*TreeNodeInstance, error) {
l.panicClosed()
o := l.Overlays[tn.ServerIdentity.ID]
if o == nil {
return nil, xerrors.New("Didn't find corresponding overlay")
}
tree := l.getTree(tn)
if tree == nil {
return nil, xerrors.New("Didn't find tree corresponding to TreeNode")
}
protID := ProtocolNameToID(protName)
if !l.Servers[tn.ServerIdentity.ID].protocols.ProtocolExists(protID) {
return nil, xerrors.New("Didn't find protocol: " + protName)
}
tok := &Token{
TreeID: tree.ID,
TreeNodeID: tn.ID,
}
io := o.protoIO.getByName(protName)
node := newTreeNodeInstance(o, tok, tn, io)
l.Nodes = append(l.Nodes, node)
return node, nil
}
// GetTreeNodeInstances returns all TreeNodeInstances that belong to a server
func (l *LocalTest) GetTreeNodeInstances(id network.ServerIdentityID) []*TreeNodeInstance {
l.panicClosed()
var nodes []*TreeNodeInstance
for _, n := range l.Overlays[id].instances {
nodes = append(nodes, n)
}
return nodes
}
// sendTreeNode injects a message directly in the Overlay-layer, bypassing
// Host and Network
func (l *LocalTest) sendTreeNode(proto string, from, to *TreeNodeInstance, msg network.Message) error {
l.panicClosed()
ft := from.Tree()
tt := to.Tree()
if ft == nil || tt == nil {
return xerrors.New("cannot find tree")
}
if !ft.ID.Equal(tt.ID) {
return xerrors.New("Can't send from one tree to another")
}
onetMsg := &ProtocolMsg{
Msg: msg,
MsgType: network.MessageType(msg),
From: from.token,
To: to.token,
}
io := l.Overlays[to.ServerIdentity().ID].protoIO.getByName(proto)
err := to.overlay.TransmitMsg(onetMsg, io)
if err != nil {
return xerrors.Errorf("transmitting message: %v", err)
}
return nil
}
// addPendingTreeMarshal takes a treeMarshal and adds it to the list of the
// known trees, also triggering dispatching of onet-messages waiting for that
// tree
func (l *LocalTest) addPendingTreeMarshal(c *Server, tm *TreeMarshal) {
l.panicClosed()
c.overlay.addPendingTreeMarshal(tm)
}
// checkPendingTreeMarshal looks whether there are any treeMarshals to be
// called
func (l *LocalTest) checkPendingTreeMarshal(c *Server, el *Roster) {
l.panicClosed()
c.overlay.checkPendingTreeMarshal(el)
}
// GetPrivate returns the private key of a server
func (l *LocalTest) GetPrivate(c *Server) ciphersuite.SecretKey {
return c.secretKey
}
// GetServices returns a slice of all services asked for.
// The sid is the id of the service that will be collected.
func (l *LocalTest) GetServices(servers []*Server, name string) []Service {
services := make([]Service, len(servers))
for i, h := range servers {
services[i] = l.Services[h.ServerIdentity.ID][name]
}
return services
}
// MakeSRS creates and returns nbr Servers, the associated Roster and the
// Service object of the first server in the list having sid as a ServiceID.
func (l *LocalTest) MakeSRS(nbr int, name string) ([]*Server, *Roster, Service) {
l.panicClosed()
servers := l.GenServers(nbr)
el := l.GenRosterFromHost(servers...)
return servers, el, l.Services[servers[0].ServerIdentity.ID][name]
}
// NewClient returns *Client for which the types depend on the mode of the
// LocalContext.
func (l *LocalTest) NewClient(serviceName string) *Client {
if _, ok := l.builder.(*LocalBuilder); ok {
log.Fatal("Can't make local client")
return nil
}
return NewClient(serviceName)
}
// NewClientKeep returns *Client for which the types depend on the mode of the
// LocalContext, the connection is not closed after sending requests.
func (l *LocalTest) NewClientKeep(serviceName string) *Client {
if _, ok := l.builder.(*LocalBuilder); ok {
log.Fatal("Can't make local client")
return nil
}
return NewClientKeep(serviceName)
}
// genLocalHosts returns n servers created with a localRouter
func (l *LocalTest) genLocalHosts(n int) []*Server {
l.panicClosed()
servers := make([]*Server, n)
for i := 0; i < n; i++ {
servers[i] = l.NewServer(l.latestPort)
if l.latestPort > 0 {
// When the port is defined, latestPort needs to be
// increased to not conflict for future servers.
l.latestPort += 10
}
}
return servers
}
// NewServer returns a new server which type is determined by the local mode:
// TCP or Local. If it's TCP, then an available port is used, otherwise, the
// port given in argument is used.
func (l *LocalTest) NewServer(port int) *Server {
l.panicClosed()
l.builder.SetPort(port)
l.builder.SetDbPath(l.path)
server := l.builder.Build()
server.StartInBackground()
l.Servers[server.ServerIdentity.ID] = server
l.Overlays[server.ServerIdentity.ID] = server.overlay
l.Services[server.ServerIdentity.ID] = server.serviceManager.services
return server
}