summaryrefslogtreecommitdiff
path: root/test/framework/fixtures/libgoalFixture.go
blob: 3b54f9f7aa8f234938952b0aac7c42294ecf57ac (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
// Copyright (C) 2019-2023 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand.  If not, see <https://www.gnu.org/licenses/>.

package fixtures

import (
	"bufio"
	"fmt"
	"os"
	"os/exec"
	"path/filepath"
	"strings"
	"syscall"
	"testing"
	"time"

	"github.com/algorand/go-deadlock"
	"github.com/stretchr/testify/require"

	"github.com/algorand/go-algorand/config"
	"github.com/algorand/go-algorand/crypto"
	"github.com/algorand/go-algorand/crypto/merklearray"
	"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
	"github.com/algorand/go-algorand/data/account"
	"github.com/algorand/go-algorand/gen"
	"github.com/algorand/go-algorand/libgoal"
	"github.com/algorand/go-algorand/netdeploy"
	"github.com/algorand/go-algorand/nodecontrol"
	"github.com/algorand/go-algorand/protocol"
	"github.com/algorand/go-algorand/test/e2e-go/globals"
	"github.com/algorand/go-algorand/util/db"
)

// LibGoalFixture is a test fixture for tests requiring a running node with a algod and kmd clients
type LibGoalFixture struct {
	baseFixture

	LibGoalClient  libgoal.Client
	NC             nodecontrol.NodeController
	rootDir        string
	Name           string
	network        netdeploy.Network
	t              TestingTB
	tMu            deadlock.RWMutex
	clientPartKeys map[string][]account.Participation
	consensus      config.ConsensusProtocols
}

// SetConsensus applies a new consensus settings which would get deployed before
// any of the nodes starts
func (f *RestClientFixture) SetConsensus(consensus config.ConsensusProtocols) {
	f.consensus = consensus
}

// Setup is called to initialize the test fixture for the test(s)
func (f *LibGoalFixture) Setup(t TestingTB, templateFile string) {
	f.setup(t, t.Name(), templateFile, true)
}

// SetupNoStart is called to initialize the test fixture for the test(s)
// but does not start the network before returning.  Call NC.Start() to start later.
func (f *LibGoalFixture) SetupNoStart(t TestingTB, templateFile string) {
	f.setup(t, t.Name(), templateFile, false)
}

// SetupShared is called to initialize the test fixture that will be used for multiple tests
func (f *LibGoalFixture) SetupShared(testName string, templateFile string) {
	f.setup(nil, testName, templateFile, true)
}

// Genesis returns the genesis data for this fixture
func (f *LibGoalFixture) Genesis() gen.GenesisData {
	return f.network.Genesis()
}

func (f *LibGoalFixture) setup(test TestingTB, testName string, templateFile string, startNetwork bool) {
	// Call initialize for our base implementation
	f.initialize(f)
	f.t = SynchronizedTest(test)
	f.rootDir = filepath.Join(f.testDir, testName)

	// In case we're running tests against the same rootDir, purge it to avoid errors from already-exists
	os.RemoveAll(f.rootDir)
	templateFile = filepath.Join(f.testDataDir, templateFile)
	importKeys := false // Don't automatically import root keys when creating folders, we'll import on-demand
	file, err := os.Open(templateFile)
	f.failOnError(err, "Template file could not be opened: %v")
	network, err := netdeploy.CreateNetworkFromTemplate("test", f.rootDir, file, f.binDir, importKeys, f.nodeExitWithError, f.consensus, false)
	f.failOnError(err, "CreateNetworkFromTemplate failed: %v")
	f.network = network

	if startNetwork {
		f.Start()
	}
}

// nodeExitWithError is a callback from the network indicating that the node exit with an error after a successful startup.
// i.e. node terminated, and not due to shutdown.. this is likely to be a crash/panic.
func (f *LibGoalFixture) nodeExitWithError(nc *nodecontrol.NodeController, err error) {
	if err == nil {
		return
	}

	f.tMu.RLock()
	defer f.tMu.RUnlock()
	if f.t == nil {
		return
	}

	f.t.Logf("Node at %s has terminated with an error: %v. Dumping logs...", nc.GetDataDir(), err)
	f.dumpLogs(filepath.Join(nc.GetDataDir(), "node.log"))

	exitError, ok := err.(*exec.ExitError)
	if !ok {
		require.NoError(f.t, err, "Node at %s has terminated with an error", nc.GetDataDir())
		return
	}
	ws := exitError.Sys().(syscall.WaitStatus)
	exitCode := ws.ExitStatus()

	require.NoError(f.t, err, "Node at %s has terminated with error code %d", nc.GetDataDir(), exitCode)
}

func (f *LibGoalFixture) importRootKeys(lg *libgoal.Client, dataDir string) {
	genID, err := lg.GenesisID()
	if err != nil {
		return
	}

	keyDir := filepath.Join(dataDir, genID)
	files, err := os.ReadDir(keyDir)
	if err != nil {
		return
	}

	accountsWithRootKeys := make(map[string]bool)
	var allPartKeys []account.Participation

	// For each of these files
	for _, info := range files {
		var handle db.Accessor

		filename := info.Name()

		// If it isn't a key file we care about, skip it
		if config.IsRootKeyFilename(filename) {
			// Fetch a handle to this database
			handle, err = db.MakeAccessor(filepath.Join(keyDir, filename), false, false)
			if err != nil {
				// Couldn't open it, skip it
				continue
			}

			// Fetch an account.Root from the database
			root, err := account.RestoreRoot(handle)
			if err != nil {
				// Couldn't read it, skip it
				continue
			}

			secretKey := root.Secrets().SK
			wh, err := lg.GetUnencryptedWalletHandle()
			f.failOnError(err, "couldn't get default wallet handle: %v")
			_, err = lg.ImportKey(wh, secretKey[:])
			if err != nil && !strings.Contains(err.Error(), "key already exists") {
				f.failOnError(err, "couldn't import secret: %v")
			}
			accountsWithRootKeys[root.Address().String()] = true
			handle.Close()
		} else if config.IsPartKeyFilename(filename) {
			// Fetch a handle to this database
			handle, err = db.MakeErasableAccessor(filepath.Join(keyDir, filename))
			if err != nil {
				// Couldn't open it, skip it
				continue
			}

			// Fetch an account.Participation from the database
			participation, err := account.RestoreParticipation(handle)
			if err != nil {
				// Couldn't read it, skip it
				handle.Close()
				continue
			}

			// Early reject partkeys if we already have a rootkey for the account
			if !accountsWithRootKeys[participation.Address().String()] {
				allPartKeys = append(allPartKeys, participation.Participation)
			}

			// close the database handle.
			participation.Close()
		}
	}

	// Go through final set of non-filtered part keys and add the partkey-only keys to our collection
	for _, part := range allPartKeys {
		if !accountsWithRootKeys[part.Address().String()] {
			f.addParticipationForClient(*lg, part)
		}
	}
}

// GetLibGoalClientFromNodeController returns the LibGoal Client for a given node controller
func (f *LibGoalFixture) GetLibGoalClientFromNodeController(nc nodecontrol.NodeController) libgoal.Client {
	return f.GetLibGoalClientFromDataDir(nc.GetDataDir())
}

// GetLibGoalClientFromDataDir returns the LibGoal Client for a given data directory
func (f *LibGoalFixture) GetLibGoalClientFromDataDir(dataDir string) libgoal.Client {
	client, err := libgoal.MakeClientWithBinDir(f.binDir, dataDir, dataDir, libgoal.KmdClient)
	f.failOnError(err, "make libgoal client failed: %v")
	f.importRootKeys(&client, dataDir)
	return client
}

// GetLibGoalClientForNamedNode returns the LibGoal Client for a given named node
func (f *LibGoalFixture) GetLibGoalClientForNamedNode(nodeName string) libgoal.Client {
	nodeDir, err := f.network.GetNodeDir(nodeName)
	f.failOnError(err, "network.GetNodeDir failed: %v")
	client, err := libgoal.MakeClientWithBinDir(f.binDir, nodeDir, nodeDir, libgoal.KmdClient)
	f.failOnError(err, "make libgoal client failed: %v")
	f.importRootKeys(&client, nodeDir)
	return client
}

// GetLibGoalClientFromNodeControllerNoKeys returns the LibGoal Client for a given node controller
func (f *LibGoalFixture) GetLibGoalClientFromNodeControllerNoKeys(nc nodecontrol.NodeController) libgoal.Client {
	return f.GetLibGoalClientFromDataDirNoKeys(nc.GetDataDir())
}

// GetLibGoalClientFromDataDirNoKeys returns the LibGoal Client for a given data directory
func (f *LibGoalFixture) GetLibGoalClientFromDataDirNoKeys(dataDir string) libgoal.Client {
	client, err := libgoal.MakeClientWithBinDir(f.binDir, dataDir, dataDir, libgoal.AlgodClient)
	f.failOnError(err, "make libgoal client failed: %v")
	return client
}

// GetLibGoalClientForNamedNodeNoKeys returns the LibGoal Client for a given named node
func (f *LibGoalFixture) GetLibGoalClientForNamedNodeNoKeys(nodeName string) libgoal.Client {
	nodeDir, err := f.network.GetNodeDir(nodeName)
	f.failOnError(err, "network.GetNodeDir failed: %v")
	client, err := libgoal.MakeClientWithBinDir(f.binDir, nodeDir, nodeDir, libgoal.AlgodClient)
	f.failOnError(err, "make libgoal client failed: %v")
	return client
}

func (f *LibGoalFixture) addParticipationForClient(lg libgoal.Client, part account.Participation) {
	f.clientPartKeys[lg.DataDir()] = append(f.clientPartKeys[lg.DataDir()], part)
}

// GetNodeControllerForDataDir returns a NodeController for the specified nodeDataDir
func (f *LibGoalFixture) GetNodeControllerForDataDir(nodeDataDir string) nodecontrol.NodeController {
	return nodecontrol.MakeNodeController(f.binDir, nodeDataDir)
}

// Start can be called to start the fixture's network if SetupNoStart() was used.
func (f *LibGoalFixture) Start() {
	err := f.network.Start(f.binDir, true)
	f.failOnError(err, "error starting network: %v")

	client, err := libgoal.MakeClientWithBinDir(f.binDir, f.PrimaryDataDir(), f.PrimaryDataDir(), libgoal.FullClient)
	f.failOnError(err, "make libgoal client failed: %v")
	f.LibGoalClient = client
	f.NC = nodecontrol.MakeNodeController(f.binDir, f.network.PrimaryDataDir())
	algodKmdPath, _ := filepath.Abs(filepath.Join(f.PrimaryDataDir(), libgoal.DefaultKMDDataDir))
	f.NC.SetKMDDataDir(algodKmdPath)
	f.clientPartKeys = make(map[string][]account.Participation)
	f.importRootKeys(&f.LibGoalClient, f.PrimaryDataDir())
}

// SetTestContext should be called within each test using a shared fixture.
// It ensures the current test context is set and then reset after the test ends
// It should be called in the form of "defer fixture.SetTestContext(t)()"
func (f *LibGoalFixture) SetTestContext(t TestingTB) func() {
	f.tMu.Lock()
	defer f.tMu.Unlock()
	f.t = SynchronizedTest(t)
	return func() {
		f.tMu.Lock()
		defer f.tMu.Unlock()
		f.t = nil
	}
}

// Run implements the Fixture.Run method
func (f *LibGoalFixture) Run(m *testing.M) int {
	return f.run(m)
}

// RunAndExit implements the Fixture.RunAndExit method
func (f *LibGoalFixture) RunAndExit(m *testing.M) {
	f.runAndExit(m)
}

// Shutdown implements the Fixture.Shutdown method
func (f *LibGoalFixture) Shutdown() {
	// Shutdown() should not be called by shared fixtures (this will panic as f.t should be null)
	f.ShutdownImpl(f.t.Failed())
}

// ShutdownImpl implements the Fixture.ShutdownImpl method
func (f *LibGoalFixture) ShutdownImpl(preserveData bool) {
	f.NC.StopKMD()
	if preserveData {
		f.network.Stop(f.binDir)
		f.dumpLogs(filepath.Join(f.PrimaryDataDir(), "node.log"))
		for _, nodeDir := range f.NodeDataDirs() {
			f.dumpLogs(filepath.Join(nodeDir, "node.log"))
		}
	} else {
		f.network.Delete(f.binDir)

		// Remove the test dir, if it was created by us as a temporary
		// directory and it is empty.  If there's anything still in the
		// test dir, os.Remove()'s rmdir will fail and have no effect;
		// we ignore this error.
		if f.testDirTmp {
			os.Remove(f.testDir)
		}
	}
}

// dumpLogs prints out log files for the running nodes
func (f *LibGoalFixture) dumpLogs(filePath string) {
	file, err := os.Open(filePath)
	if err != nil {
		f.t.Logf("could not open %s", filePath)
		return
	}
	defer file.Close()

	f.t.Log("=================================\n")
	parts := strings.Split(filePath, "/")
	f.t.Logf("%s/%s:", parts[len(parts)-2], parts[len(parts)-1]) // Primary/node.log
	scanner := bufio.NewScanner(file)
	for scanner.Scan() {
		f.t.Logf(scanner.Text())
	}
}

// intercept baseFixture.failOnError so we can clean up any algods that are still alive
func (f *LibGoalFixture) failOnError(err error, message string) {
	if err != nil {
		f.network.Stop(f.binDir)
		f.baseFixture.failOnError(err, message)
	}
}

// PrimaryDataDir returns the data directory for the PrimaryNode for the network
func (f *LibGoalFixture) PrimaryDataDir() string {
	return f.network.PrimaryDataDir()
}

// NodeDataDirs returns the (non-Primary) data directories for the network
func (f *LibGoalFixture) NodeDataDirs() []string {
	return f.network.NodeDataDirs()
}

// GetNodeDir returns the node directory that is associated with the given node name.
func (f *LibGoalFixture) GetNodeDir(nodeName string) (string, error) {
	return f.network.GetNodeDir(nodeName)
}

// GetNodeController returns the node controller that is associated with the given node name.
func (f *LibGoalFixture) GetNodeController(nodeName string) (nodecontrol.NodeController, error) {
	return f.network.GetNodeController(f.binDir, nodeName)
}

// GetBinDir retrives the bin directory
func (f *LibGoalFixture) GetBinDir() string {
	return f.binDir
}

// StartNode can be called to start a node after the network has been started
// (with the correct PeerAddresses for configured relays)
func (f *LibGoalFixture) StartNode(nodeDir string) (libgoal.Client, error) {
	err := f.network.StartNode(f.binDir, nodeDir, true)
	if err != nil {
		return libgoal.Client{}, err
	}
	var c libgoal.Client
	if c, err = libgoal.MakeClientWithBinDir(f.binDir, nodeDir, nodeDir, libgoal.DynamicClient); err != nil {
		return libgoal.Client{}, err
	}
	return c, nil
}

// GetParticipationOnlyAccounts returns accounts that only have participation keys
func (f *LibGoalFixture) GetParticipationOnlyAccounts(lg libgoal.Client) []account.Participation {
	return f.clientPartKeys[lg.DataDir()]
}

// WaitForRoundWithTimeout waits for a given round to reach. The implementation also ensures to limit the wait time for each round to the
// globals.MaxTimePerRound so we can alert when we're getting "hung" before waiting for all the expected rounds to reach.
func (f *LibGoalFixture) WaitForRoundWithTimeout(roundToWaitFor uint64) error {
	return f.ClientWaitForRoundWithTimeout(f.LibGoalClient, roundToWaitFor)
}

// ClientWaitForRoundWithTimeout waits for a given round to be reached by the specific client/node. The implementation
// also ensures to limit the wait time for each round to the globals.MaxTimePerRound so we can alert when we're
// getting "hung" before waiting for all the expected rounds to reach.
func (f *LibGoalFixture) ClientWaitForRoundWithTimeout(client libgoal.Client, roundToWaitFor uint64) error {
	status, err := client.Status()
	require.NoError(f.t, err)
	lastRound := status.LastRound

	// If node is already at or past target round, we're done
	if lastRound >= roundToWaitFor {
		return nil
	}

	roundTime := globals.MaxTimePerRound * 10 // For first block, we wait much longer
	roundComplete := make(chan error, 2)

	for nextRound := lastRound + 1; lastRound < roundToWaitFor; {
		roundStarted := time.Now()

		go func(done chan error) {
			err := f.ClientWaitForRound(client, nextRound, roundTime)
			done <- err
		}(roundComplete)

		select {
		case lastError := <-roundComplete:
			if lastError != nil {
				close(roundComplete)
				return lastError
			}
		case <-time.After(roundTime):
			// we've timed out.
			time := time.Now().Sub(roundStarted)
			return fmt.Errorf("fixture.WaitForRound took %3.2f seconds between round %d and %d", time.Seconds(), lastRound, nextRound)
		}

		roundTime = singleRoundMaxTime
		lastRound++
		nextRound++
	}
	return nil
}

// ClientWaitForRound waits up to the specified amount of time for
// the network to reach or pass the specified round, on the specific client/node
func (f *LibGoalFixture) ClientWaitForRound(client libgoal.Client, round uint64, waitTime time.Duration) error {
	timeout := time.NewTimer(waitTime)
	for {
		status, err := client.Status()
		if err != nil {
			return err
		}
		if status.LastRound >= round {
			return nil
		}
		select {
		case <-timeout.C:
			return fmt.Errorf("timeout waiting for round %v", round)
		case <-time.After(200 * time.Millisecond):
		}
	}
}

// CurrentConsensusParams returns the consensus parameters for the currently active protocol
func (f *LibGoalFixture) CurrentConsensusParams() (consensus config.ConsensusParams, err error) {
	status, err := f.LibGoalClient.Status()
	if err != nil {
		return
	}

	return f.ConsensusParams(status.LastRound)
}

// ConsensusParams returns the consensus parameters for the protocol from the specified round
func (f *LibGoalFixture) ConsensusParams(round uint64) (consensus config.ConsensusParams, err error) {
	block, err := f.LibGoalClient.BookkeepingBlock(round)
	if err != nil {
		return
	}
	version := protocol.ConsensusVersion(block.CurrentProtocol)
	if f.consensus != nil {
		consensus, has := f.consensus[version]
		if has {
			return consensus, nil
		}
	}
	consensus = config.Consensus[version]
	return
}

// CurrentMinFeeAndBalance returns the MinTxnFee and MinBalance for the currently active protocol
// If MinBalance is 0, we provide a reasonable default of the current consensus version's minBalance,
// to ensure accounts have funds when MinBalance is used to fund new accounts
func (f *LibGoalFixture) CurrentMinFeeAndBalance() (minFee, minBalance uint64, err error) {
	params, err := f.CurrentConsensusParams()
	if err != nil {
		return
	}
	minBalance = params.MinBalance
	if minBalance == 0 {
		defaultParams := config.Consensus[protocol.ConsensusCurrentVersion]
		minBalance = defaultParams.MinBalance
	}
	return params.MinTxnFee, minBalance, nil
}

// MinFeeAndBalance returns the MinTxnFee and MinBalance for the protocol from the specified round
// If MinBalance is 0, we provide a resonable default of 1000 to ensure accounts have funds when
// MinBalance is used to fund new accounts
func (f *LibGoalFixture) MinFeeAndBalance(round uint64) (minFee, minBalance uint64, err error) {
	params, err := f.ConsensusParams(round)
	if err != nil {
		return
	}
	minBalance = params.MinBalance
	if minBalance == 0 {
		minBalance = 1000
	}
	return params.MinTxnFee, minBalance, nil
}

// TransactionProof returns a proof for usage in merkle array verification for the provided transaction.
func (f *LibGoalFixture) TransactionProof(txid string, round uint64, hashType crypto.HashType) (model.TransactionProofResponse, merklearray.SingleLeafProof, error) {
	proofResp, err := f.LibGoalClient.TransactionProof(txid, round, hashType)
	if err != nil {
		return model.TransactionProofResponse{}, merklearray.SingleLeafProof{}, err
	}

	proof, err := merklearray.ProofDataToSingleLeafProof(string(proofResp.Hashtype), proofResp.Proof)
	if err != nil {
		return model.TransactionProofResponse{}, merklearray.SingleLeafProof{}, err
	}

	return proofResp, proof, nil
}

// LightBlockHeaderProof returns a proof for usage in merkle array verification for the provided block's light block header.
func (f *LibGoalFixture) LightBlockHeaderProof(round uint64) (model.LightBlockHeaderProofResponse, merklearray.SingleLeafProof, error) {
	proofResp, err := f.LibGoalClient.LightBlockHeaderProof(round)

	if err != nil {
		return model.LightBlockHeaderProofResponse{}, merklearray.SingleLeafProof{}, err
	}

	proof, err := merklearray.ProofDataToSingleLeafProof(crypto.Sha256.String(), proofResp.Proof)
	if err != nil {
		return model.LightBlockHeaderProofResponse{}, merklearray.SingleLeafProof{}, err
	}

	return proofResp, proof, nil
}