summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Lee <64482439+algojohnlee@users.noreply.github.com>2021-05-24 19:31:05 -0400
committerGitHub <noreply@github.com>2021-05-24 19:31:05 -0400
commit8fe22d4e38a9661a5585abf93fbc7d08fa2c78d2 (patch)
tree54bf5fe10f0529bf3551634ffb9421c6325077cc
parent219b78d98184274b4ddccff47cdd1fa8091c8dac (diff)
parent34f71063e3554c9f568e0b425fa24e513efdd8d3 (diff)
Merge pull request #2179 from Algo-devops-service/relstable2.6.0v2.6.0-stable
go-algorand 2.6.0-stable
-rw-r--r--.travis.yml3
-rw-r--r--Makefile19
-rw-r--r--README.md6
-rw-r--r--agreement/abstractions.go11
-rw-r--r--agreement/agreementtest/keyManager.go19
-rw-r--r--agreement/agreementtest/simulate_test.go3
-rw-r--r--agreement/cryptoVerifier_test.go3
-rw-r--r--agreement/demux.go17
-rw-r--r--agreement/fuzzer/fuzzer_test.go19
-rw-r--r--agreement/fuzzer/keyManager_test.go13
-rw-r--r--agreement/gossip/networkFull_test.go26
-rw-r--r--agreement/pseudonode.go88
-rw-r--r--agreement/pseudonode_test.go78
-rw-r--r--agreement/service_test.go26
-rw-r--r--buildnumber.dat2
-rw-r--r--catchup/catchpointService.go55
-rw-r--r--catchup/fetcher.go310
-rw-r--r--catchup/fetcher_test.go654
-rw-r--r--catchup/httpFetcher.go169
-rw-r--r--catchup/peerSelector.go266
-rw-r--r--catchup/peerSelector_test.go220
-rw-r--r--catchup/pref_test.go36
-rw-r--r--catchup/service.go250
-rw-r--r--catchup/service_test.go345
-rw-r--r--catchup/universalFetcher.go272
-rw-r--r--catchup/universalFetcher_test.go129
-rw-r--r--catchup/wsFetcher.go193
-rw-r--r--cmd/algofix/deadlock.go35
-rw-r--r--cmd/algofix/deadlock_test.go176
-rw-r--r--cmd/algoh/main.go2
-rw-r--r--cmd/algokey/part.go8
-rw-r--r--cmd/catchpointdump/commands.go2
-rw-r--r--cmd/catchpointdump/database.go70
-rw-r--r--cmd/catchpointdump/file.go2
-rw-r--r--cmd/catchpointdump/net.go2
-rw-r--r--cmd/genesis/newgenesis.go8
-rw-r--r--cmd/goal/account.go8
-rw-r--r--cmd/goal/application.go49
-rw-r--r--cmd/goal/asset.go15
-rw-r--r--cmd/goal/clerk.go45
-rw-r--r--cmd/goal/interact.go7
-rw-r--r--cmd/goal/messages.go6
-rw-r--r--cmd/goal/messages_common.go24
-rw-r--r--cmd/goal/messages_windows.go22
-rw-r--r--cmd/goal/node.go21
-rw-r--r--cmd/netgoal/generate.go43
-rw-r--r--cmd/netgoal/network.go18
-rw-r--r--cmd/netgoal/recipe.go1
-rw-r--r--cmd/opdoc/opdoc.go24
-rw-r--r--cmd/pingpong/runCmd.go10
-rw-r--r--cmd/tealdbg/cdtState.go14
-rw-r--r--cmd/tealdbg/dryrunRequest.go10
-rw-r--r--cmd/tealdbg/local.go43
-rw-r--r--cmd/tealdbg/local_test.go8
-rw-r--r--cmd/tealdbg/main.go8
-rw-r--r--cmd/tealdbg/server.go4
-rw-r--r--cmd/tealdbg/server_test.go2
-rwxr-xr-xcmd/updater/systemd-setup.sh2
-rwxr-xr-xcmd/updater/update.sh60
-rw-r--r--compactcert/abstractions.go2
-rw-r--r--compactcert/builder.go3
-rw-r--r--compactcert/signer.go12
-rw-r--r--compactcert/worker_test.go12
-rw-r--r--config/config.go75
-rw-r--r--config/consensus.go14
-rw-r--r--config/local_defaults.go10
-rw-r--r--config/version.go2
-rw-r--r--crypto/merkletrie/cache_test.go2
-rw-r--r--crypto/merkletrie/committer_test.go68
-rw-r--r--crypto/merkletrie/node.go12
-rw-r--r--crypto/merkletrie/trie.go8
-rw-r--r--crypto/merkletrie/trie_test.go18
-rw-r--r--daemon/algod/api/server/v1/handlers/responses.go2
-rw-r--r--daemon/algod/api/server/v2/dryrun_test.go5
-rw-r--r--daemon/algod/api/server/v2/handlers.go22
-rw-r--r--daemon/algod/api/server/v2/test/handlers_test.go34
-rw-r--r--daemon/algod/api/server/v2/test/helpers.go6
-rw-r--r--daemon/algod/deadlockLogger.go81
-rw-r--r--daemon/algod/deadlock_test.go37
-rw-r--r--daemon/kmd/lib/kmdapi/requests.go4
-rw-r--r--daemon/kmd/lib/kmdapi/responses.go155
-rw-r--r--data/account/account.go17
-rw-r--r--data/account/participation.go117
-rw-r--r--data/account/participation_test.go31
-rw-r--r--data/accountManager.go47
-rw-r--r--data/common_test.go4
-rw-r--r--data/ledger.go6
-rw-r--r--data/pools/transactionPool.go7
-rw-r--r--data/transactions/logic/.gitignore2
-rw-r--r--data/transactions/logic/README.md17
-rw-r--r--data/transactions/logic/README_in.md14
-rw-r--r--data/transactions/logic/TEAL_opcodes.md51
-rw-r--r--data/transactions/logic/assembler.go690
-rw-r--r--data/transactions/logic/assembler_test.go352
-rw-r--r--data/transactions/logic/backwardCompat_test.go70
-rw-r--r--data/transactions/logic/doc.go562
-rw-r--r--data/transactions/logic/doc_test.go29
-rw-r--r--data/transactions/logic/eval.go314
-rw-r--r--data/transactions/logic/evalStateful_test.go86
-rw-r--r--data/transactions/logic/eval_test.go509
-rw-r--r--data/transactions/logic/fields.go2
-rw-r--r--data/transactions/logic/opcodes.go42
-rw-r--r--data/transactions/logic/opcodes_test.go4
-rw-r--r--data/transactions/signedtxn.go10
-rw-r--r--data/transactions/transaction.go43
-rw-r--r--data/transactions/transaction_test.go645
-rw-r--r--data/transactions/verify/txn.go5
-rw-r--r--debug/logfilter/example1.in31
-rw-r--r--debug/logfilter/example1.out.expected2
-rw-r--r--debug/logfilter/example2.in98
-rw-r--r--debug/logfilter/example2.out.expected22
-rw-r--r--debug/logfilter/example3.in417
-rw-r--r--debug/logfilter/example3.out.expected206
-rw-r--r--debug/logfilter/example4.in21
-rw-r--r--debug/logfilter/example4.out.expected2
-rw-r--r--debug/logfilter/example5.in128
-rw-r--r--debug/logfilter/example5.out.expected33
-rw-r--r--debug/logfilter/example6.in2619
-rw-r--r--debug/logfilter/example6.out.expected385
-rw-r--r--debug/logfilter/example7.in63
-rw-r--r--debug/logfilter/example7.out.expected64
-rw-r--r--debug/logfilter/example8.in47
-rw-r--r--debug/logfilter/example8.out.expected30
-rw-r--r--debug/logfilter/main.go148
-rw-r--r--debug/logfilter/main_test.go63
-rw-r--r--docker/build/Dockerfile2
-rw-r--r--docker/build/Dockerfile-deploy2
-rw-r--r--docker/build/cicd.alpine.Dockerfile2
-rw-r--r--docker/build/cicd.centos.Dockerfile2
-rw-r--r--docker/build/cicd.ubuntu.Dockerfile2
-rw-r--r--gen/generate.go107
-rw-r--r--gen/generate_test.go18
-rw-r--r--installer/config.json.example10
-rw-r--r--ledger/README.md3
-rw-r--r--ledger/accountdb.go24
-rw-r--r--ledger/accountdb_test.go4
-rw-r--r--ledger/acctupdates.go322
-rw-r--r--ledger/acctupdates_test.go387
-rw-r--r--ledger/appcow.go2
-rw-r--r--ledger/appcow_test.go2
-rw-r--r--ledger/applications.go3
-rw-r--r--ledger/applications_test.go10
-rw-r--r--ledger/apply/application.go17
-rw-r--r--ledger/apply/application_test.go24
-rw-r--r--ledger/apply/keyreg.go15
-rw-r--r--ledger/apply/keyreg_test.go54
-rw-r--r--ledger/blockqueue.go4
-rw-r--r--ledger/catchupaccessor.go16
-rw-r--r--ledger/catchupaccessor_test.go2
-rw-r--r--ledger/compactcert.go12
-rw-r--r--ledger/compactcert_test.go168
-rw-r--r--ledger/cow.go4
-rw-r--r--ledger/cow_test.go11
-rw-r--r--ledger/eval.go367
-rw-r--r--ledger/eval_test.go285
-rw-r--r--ledger/ledger.go11
-rw-r--r--ledger/ledger_perf_test.go2
-rw-r--r--ledger/ledger_test.go187
-rw-r--r--ledger/ledgercore/statedelta.go3
-rw-r--r--ledger/ledgercore/statedelta_test.go2
-rw-r--r--ledger/perf_test.go9
-rw-r--r--ledger/txtail_test.go2
-rw-r--r--libgoal/participation.go35
-rw-r--r--libgoal/transactions.go2
-rw-r--r--logging/telemetryspec/metric.go22
-rw-r--r--netdeploy/network.go9
-rw-r--r--netdeploy/networkTemplate.go37
-rw-r--r--netdeploy/remote/bootstrappedNetwork.go45
-rw-r--r--netdeploy/remote/bootstrappedNetwork_test.go49
-rw-r--r--netdeploy/remote/deployedNetwork.go484
-rw-r--r--netdeploy/remote/deployedNetwork_test.go121
-rw-r--r--netdeploy/remote/nodecfg/nodeConfigurator.go44
-rw-r--r--network/connPerfMon.go2
-rw-r--r--network/ping.go3
-rw-r--r--network/requestTracker.go20
-rw-r--r--network/requestTracker_test.go31
-rw-r--r--network/wsNetwork.go103
-rw-r--r--network/wsNetwork_test.go168
-rw-r--r--network/wsPeer.go143
-rw-r--r--network/wsPeer_test.go11
-rw-r--r--node/netprio.go7
-rw-r--r--node/node.go78
-rw-r--r--nodecontrol/NodeController.go13
-rw-r--r--nodecontrol/algodControl.go12
-rw-r--r--nodecontrol/kmdControl.go15
-rw-r--r--nodecontrol/kmdControl_common.go33
-rw-r--r--nodecontrol/kmdControl_windows.go25
-rw-r--r--rpcs/blockService.go129
-rw-r--r--rpcs/blockService_test.go208
-rw-r--r--rpcs/txService_test.go5
-rw-r--r--rpcs/txSyncer_test.go4
-rwxr-xr-xscripts/configure_dev.sh9
-rwxr-xr-xscripts/create_and_deploy_recipe.sh6
-rwxr-xr-xscripts/install_linux_deps.sh15
-rwxr-xr-xscripts/release/mule/deploy/docker/docker.sh3
-rwxr-xr-xscripts/release/mule/deploy/releases_page/generate_releases_page.py120
-rwxr-xr-xscripts/release/mule/package/deb/package.sh2
-rw-r--r--shared/pingpong/accounts.go6
-rw-r--r--shared/pingpong/config.go8
-rw-r--r--shared/pingpong/pingpong.go257
-rw-r--r--test/README.md6
-rw-r--r--test/commandandcontrol/cc_agent/component/agent.go2
-rw-r--r--test/commandandcontrol/cc_agent/component/pingPongComponent.go8
-rw-r--r--test/commandandcontrol/cc_service/main.go4
-rw-r--r--test/e2e-go/cli/algod/cleanup_test.go2
-rw-r--r--test/e2e-go/cli/algod/stdstreams_test.go2
-rw-r--r--test/e2e-go/cli/goal/account_test.go8
-rw-r--r--test/e2e-go/cli/goal/clerk_test.go4
-rw-r--r--test/e2e-go/cli/goal/expect/README.md2
-rw-r--r--test/e2e-go/cli/goal/expect/goalAccountTest.exp27
-rw-r--r--test/e2e-go/cli/goal/expect/goalExpectCommon.exp45
-rw-r--r--test/e2e-go/cli/goal/expect/goalNodeTest.exp17
-rw-r--r--test/e2e-go/cli/goal/expect/singleNodeNetworkTest.exp72
-rwxr-xr-xtest/e2e-go/cli/goal/expect/tealConsensusTest.exp107
-rw-r--r--test/e2e-go/cli/goal/node_cleanup_test.go3
-rw-r--r--test/e2e-go/cli/perf/libgoal_test.go7
-rw-r--r--test/e2e-go/cli/perf/payment_test.go18
-rw-r--r--test/e2e-go/cli/tealdbg/expect/tealdbgSpinoffTest.exp2
-rw-r--r--test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp96
-rw-r--r--test/e2e-go/features/auction/auctionCancel_test.go6
-rw-r--r--test/e2e-go/features/auction/auctionErrors_test.go10
-rw-r--r--test/e2e-go/features/auction/basicAuction_test.go14
-rw-r--r--test/e2e-go/features/catchup/basicCatchup_test.go21
-rw-r--r--test/e2e-go/features/catchup/catchpointCatchup_test.go6
-rw-r--r--test/e2e-go/features/compactcert/compactcert_test.go31
-rw-r--r--test/e2e-go/features/multisig/multisig_test.go8
-rw-r--r--test/e2e-go/features/participation/onlineOfflineParticipation_test.go4
-rw-r--r--test/e2e-go/features/participation/overlappingParticipationKeys_test.go209
-rw-r--r--test/e2e-go/features/participation/participationRewards_test.go18
-rw-r--r--test/e2e-go/features/partitionRecovery/partitionRecovery_test.go8
-rw-r--r--test/e2e-go/features/teal/compile_test.go2
-rw-r--r--test/e2e-go/features/transactions/accountv2_test.go17
-rw-r--r--test/e2e-go/features/transactions/asset_test.go12
-rw-r--r--test/e2e-go/features/transactions/close_account_test.go2
-rw-r--r--test/e2e-go/features/transactions/group_test.go6
-rw-r--r--test/e2e-go/features/transactions/lease_test.go12
-rw-r--r--test/e2e-go/features/transactions/onlineStatusChange_test.go2
-rw-r--r--test/e2e-go/features/transactions/proof_test.go2
-rw-r--r--test/e2e-go/features/transactions/sendReceive_test.go2
-rw-r--r--test/e2e-go/features/transactions/transactionPool_test.go6
-rw-r--r--test/e2e-go/kmd/e2e_kmd_server_client_test.go11
-rw-r--r--test/e2e-go/kmd/e2e_kmd_sqlite_test.go8
-rw-r--r--test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go181
-rw-r--r--test/e2e-go/kmd/e2e_kmd_wallet_multisig_test.go101
-rw-r--r--test/e2e-go/kmd/e2e_kmd_wallet_test.go78
-rw-r--r--test/e2e-go/perf/basic_test.go26
-rw-r--r--test/e2e-go/restAPI/restClient_test.go330
-rw-r--r--test/e2e-go/stress/transactions/createManyAndGoOnline_test.go2
-rw-r--r--test/e2e-go/upgrades/application_support_test.go326
-rw-r--r--test/e2e-go/upgrades/rekey_support_test.go16
-rw-r--r--test/e2e-go/upgrades/send_receive_upgrade_test.go2
-rw-r--r--test/framework/fixtures/auctionFixture.go5
-rw-r--r--test/framework/fixtures/expectFixture.go22
-rw-r--r--test/framework/fixtures/fixture.go133
-rw-r--r--test/framework/fixtures/kmdFixture.go28
-rw-r--r--test/framework/fixtures/libgoalFixture.go20
-rw-r--r--test/framework/fixtures/restClientFixture.go4
-rw-r--r--test/heapwatch/README.md43
-rw-r--r--test/heapwatch/bwstart.sh44
-rw-r--r--test/heapwatch/heapWatch.py263
-rw-r--r--test/heapwatch/metrics_delta.py140
-rwxr-xr-xtest/heapwatch/start.sh34
-rwxr-xr-xtest/heapwatch/stop.sh33
-rwxr-xr-xtest/scripts/e2e.sh21
-rwxr-xr-xtest/scripts/e2e_client_runner.py44
-rwxr-xr-xtest/scripts/e2e_go_tests.sh11
-rwxr-xr-xtest/scripts/e2e_subs/rest-applications-endpoint.sh35
-rwxr-xr-xtest/scripts/e2e_subs/rest-assets-endpoint.sh27
-rwxr-xr-xtest/scripts/e2e_subs/rest-genesis-endpoint.sh15
-rwxr-xr-xtest/scripts/e2e_subs/rest-pprof.sh32
-rwxr-xr-xtest/scripts/e2e_subs/rest.sh107
-rwxr-xr-xtest/scripts/e2e_subs/serial/rest-proof-endpoint.sh25
-rwxr-xr-xtest/scripts/e2e_subs/v26/teal-v3-only.sh99
-rw-r--r--test/testdata/configs/config-v16.json92
-rw-r--r--test/testdata/deployednettemplates/networks/bootstrapped/badSpec.json8
-rw-r--r--test/testdata/deployednettemplates/networks/bootstrapped/okSpec.json8
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile16
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/boostrappedFile.json8
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/gen_topology.py27
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json1013
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json2564
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json22
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/nonPartNode.json5
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/recipe.json8
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/relay.json11
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/topology.json156
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1/node.json4
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1/relay.json2
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario2/node.json2
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario2/relay.json2
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario3/node.json4
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario3/relay.json2
-rw-r--r--test/testdata/nettemplates/ShortParticipationKeys.json55
-rw-r--r--test/testdata/nettemplates/SingleNodeNetwork.json33
-rw-r--r--test/testdata/nettemplates/TwoNodes50EachV26.json29
-rw-r--r--tools/network/telemetryURIUpdateService.go2
-rw-r--r--util/bloom/bloom.go43
-rw-r--r--util/bloom/bloom_test.go120
-rw-r--r--util/db/dbutil.go6
-rw-r--r--util/db/perf_test.go1
-rw-r--r--util/metrics/counter_test.go6
-rw-r--r--util/metrics/gauge_test.go2
-rw-r--r--util/metrics/segment_test.go2
-rw-r--r--util/metrics/tagcounter.go151
-rw-r--r--util/metrics/tagcounter_test.go146
-rw-r--r--util/process_common.go6
-rw-r--r--util/process_windows.go164
307 files changed, 20227 insertions, 4590 deletions
diff --git a/.travis.yml b/.travis.yml
index 691285895..d4da7339b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -165,6 +165,7 @@ jobs:
cache:
directories:
- crypto/lib
+ - $HOME/docker_cache
before_install:
- |-
@@ -189,6 +190,7 @@ before_install:
export MAKE=mingw32-make # so that Autotools can find it
;;
esac
+ docker load -i $HOME/docker_cache/images.tar || true
before_cache:
- |-
@@ -198,6 +200,7 @@ before_cache:
$msys2 pacman --sync --clean --noconfirm
;;
esac
+ docker save -o $HOME/docker_cache/images.tar $(docker images -a -q)
addons:
apt:
diff --git a/Makefile b/Makefile
index 7bfbebc2e..b10d47ca9 100644
--- a/Makefile
+++ b/Makefile
@@ -107,7 +107,18 @@ generate: deps
msgp: $(patsubst %,%/msgp_gen.go,$(MSGP_GENERATE))
%/msgp_gen.go: deps ALWAYS
- $(GOPATH1)/bin/msgp -file ./$(@D) -o $@ -warnmask github.com/algorand/go-algorand
+ @set +e; \
+ printf "msgp: $(@D)..."; \
+ $(GOPATH1)/bin/msgp -file ./$(@D) -o $@ -warnmask github.com/algorand/go-algorand > ./$@.out 2>&1; \
+ if [ "$$?" != "0" ]; then \
+ printf "failed:\n$(GOPATH1)/bin/msgp -file ./$(@D) -o $@ -warnmask github.com/algorand/go-algorand\n"; \
+ cat ./$@.out; \
+ rm ./$@.out; \
+ exit 1; \
+ else \
+ echo " done."; \
+ fi; \
+ rm -f ./$@.out
ALWAYS:
# build our fork of libsodium, placing artifacts into crypto/lib/ and crypto/include/
@@ -210,17 +221,17 @@ $(GOPATH1)/bin/%:
cp -f $< $@
test: build
- go test $(GOTAGS) -race $(UNIT_TEST_SOURCES) -timeout 3600s
+ go test $(GOTAGS) -race $(UNIT_TEST_SOURCES) -timeout 3600s | logfilter
fulltest: build-race
for PACKAGE_DIRECTORY in $(UNIT_TEST_SOURCES) ; do \
- go test $(GOTAGS) -timeout 2500s -race $$PACKAGE_DIRECTORY; \
+ go test $(GOTAGS) -timeout 2500s -race $$PACKAGE_DIRECTORY | logfilter; \
done
shorttest: build-race $(addprefix short_test_target_, $(UNIT_TEST_SOURCES))
$(addprefix short_test_target_, $(UNIT_TEST_SOURCES)): build
- @go test $(GOTAGS) -short -timeout 2500s -race $(subst short_test_target_,,$@)
+ @go test $(GOTAGS) -short -timeout 2500s -race $(subst short_test_target_,,$@) | logfilter
integration: build-race
./test/scripts/run_integration_tests.sh
diff --git a/README.md b/README.md
index 51a659c6d..bfca90355 100644
--- a/README.md
+++ b/README.md
@@ -22,8 +22,10 @@ the [official Go documentation website](https://golang.org/doc/).
### Linux / OSX ###
We currently strive to support Debian based distributions with Ubuntu 18.04
-being our official release target. Our core engineering team uses Linux and OSX,
-so both environments are well supported for development.
+being our official release target.
+Building on Arch Linux works as well.
+Our core engineering team uses Linux and OSX, so both environments are well
+supported for development.
OSX only: [Homebrew (brew)](https://brew.sh) must be installed before
continuing. [Here](https://docs.brew.sh/Installation) are the installation
diff --git a/agreement/abstractions.go b/agreement/abstractions.go
index ee0cf43b6..2384ef50e 100644
--- a/agreement/abstractions.go
+++ b/agreement/abstractions.go
@@ -225,13 +225,10 @@ type LedgerWriter interface {
// A KeyManager stores and deletes participation keys.
type KeyManager interface {
- // Keys returns an immutable array of participation intervals to
- // participating accounts.
- Keys() []account.Participation
-
- // HasLiveKeys returns true if we have any Participation
- // keys valid for the specified round range (inclusive)
- HasLiveKeys(from, to basics.Round) bool
+ // VotingKeys returns an immutable array of voting keys that are
+ // valid for the provided votingRound, and were available at
+ // keysRound.
+ VotingKeys(votingRound, keysRound basics.Round) []account.Participation
}
// MessageHandle is an ID referring to a specific message.
diff --git a/agreement/agreementtest/keyManager.go b/agreement/agreementtest/keyManager.go
index 6b3b968f6..384fba8cd 100644
--- a/agreement/agreementtest/keyManager.go
+++ b/agreement/agreementtest/keyManager.go
@@ -24,24 +24,15 @@ import (
// SimpleKeyManager provides a simple implementation of a KeyManager.
type SimpleKeyManager []account.Participation
-// Keys implements KeyManager.Keys.
-func (m SimpleKeyManager) Keys() []account.Participation {
+// VotingKeys implements KeyManager.VotingKeys.
+func (m SimpleKeyManager) VotingKeys(votingRound, _ basics.Round) []account.Participation {
var km []account.Participation
for _, acc := range m {
- km = append(km, acc)
- }
- return km
-}
-
-// HasLiveKeys returns true if we have any Participation
-// keys valid for the specified round range (inclusive)
-func (m SimpleKeyManager) HasLiveKeys(from, to basics.Round) bool {
- for _, acc := range m {
- if acc.OverlapsInterval(from, to) {
- return true
+ if acc.OverlapsInterval(votingRound, votingRound) {
+ km = append(km, acc)
}
}
- return false
+ return km
}
// DeleteOldKeys implements KeyManager.DeleteOldKeys.
diff --git a/agreement/agreementtest/simulate_test.go b/agreement/agreementtest/simulate_test.go
index 3017a117a..c4ccb69b1 100644
--- a/agreement/agreementtest/simulate_test.go
+++ b/agreement/agreementtest/simulate_test.go
@@ -356,7 +356,8 @@ func generateNAccounts(t *testing.T, N int, firstRound, lastRound basics.Round,
if err != nil {
panic(err)
}
- accounts = append(accounts, part)
+ accounts = append(accounts, part.Participation)
+ part.Close()
}
return
}
diff --git a/agreement/cryptoVerifier_test.go b/agreement/cryptoVerifier_test.go
index 124452454..4f9e1edb9 100644
--- a/agreement/cryptoVerifier_test.go
+++ b/agreement/cryptoVerifier_test.go
@@ -317,7 +317,8 @@ func BenchmarkCryptoVerifierProposalVertification(b *testing.B) {
}
Period := period(0)
- participation := pn.getParticipations("BenchmarkCryptoVerifierProposalVertification", ledger.NextRound())
+ pn.loadRoundParticipationKeys(ledger.NextRound())
+ participation := pn.participationKeys
proposals, _ := pn.makeProposals(ledger.NextRound(), Period, participation)
diff --git a/agreement/demux.go b/agreement/demux.go
index 970a3a4c1..a65e5552c 100644
--- a/agreement/demux.go
+++ b/agreement/demux.go
@@ -238,12 +238,19 @@ func (d *demux) next(s *Service, deadline time.Duration, fastDeadline time.Durat
deadlineCh := s.Clock.TimeoutAt(deadline)
var fastDeadlineCh <-chan time.Time
- proto, err := d.ledger.ConsensusVersion(ParamsRound(currentRound))
- if err == nil && config.Consensus[proto].FastPartitionRecovery {
- fastDeadlineCh = s.Clock.TimeoutAt(fastDeadline)
+ fastPartitionRecoveryEnabled := false
+ if proto, err := d.ledger.ConsensusVersion(ParamsRound(currentRound)); err != nil {
+ logging.Base().Warnf("demux: could not get consensus parameters for round %d: %v", ParamsRound(currentRound), err)
+ // this might happen during catchup, since the Ledger.Wait fires as soon as a new block is received by the ledger, which could be
+ // far before it's being committed. In these cases, it should be safe to default to the current consensus version. On subsequent
+ // iterations, it will get "corrected" since the ledger would finish flushing the blocks to disk.
+ fastPartitionRecoveryEnabled = config.Consensus[protocol.ConsensusCurrentVersion].FastPartitionRecovery
+ } else {
+ fastPartitionRecoveryEnabled = config.Consensus[proto].FastPartitionRecovery
}
- if err != nil {
- logging.Base().Errorf("could not get consensus parameters for round %d: %v", ParamsRound(currentRound), err)
+
+ if fastPartitionRecoveryEnabled {
+ fastDeadlineCh = s.Clock.TimeoutAt(fastDeadline)
}
d.UpdateEventsQueue(eventQueueDemux, 0)
diff --git a/agreement/fuzzer/fuzzer_test.go b/agreement/fuzzer/fuzzer_test.go
index 31702ca32..bd3b6bb14 100644
--- a/agreement/fuzzer/fuzzer_test.go
+++ b/agreement/fuzzer/fuzzer_test.go
@@ -84,7 +84,7 @@ func MakeFuzzer(config FuzzerConfig) *Fuzzer {
crashAccessors: make([]db.Accessor, config.NodesCount),
accounts: make([]account.Participation, config.NodesCount),
balances: make(map[basics.Address]basics.AccountData),
- accountAccessors: make([]db.Accessor, config.NodesCount*2),
+ accountAccessors: make([]db.Accessor, config.NodesCount),
ledgers: make([]*testLedger, config.NodesCount),
agreementParams: make([]agreement.Parameters, config.NodesCount),
tickGranularity: time.Millisecond * 300,
@@ -196,7 +196,7 @@ func (n *Fuzzer) initAccountsAndBalances(rootSeed []byte, onlineNodes []bool) er
if err != nil {
return err
}
- n.accountAccessors[i*2+0] = rootAccess
+ n.accountAccessors[i] = rootAccess
seed = sha256.Sum256(seed[:])
root, err := account.ImportRoot(rootAccess, seed)
@@ -205,27 +205,12 @@ func (n *Fuzzer) initAccountsAndBalances(rootSeed []byte, onlineNodes []bool) er
}
rootAddress := root.Address()
- partAccess, err := db.MakeAccessor(n.networkName+"part"+strconv.Itoa(i+off), false, true)
-
- if err != nil {
- return err
- }
-
- n.accountAccessors[i*2+1] = partAccess
-
n.accounts[i] = account.Participation{
Parent: rootAddress,
VRF: generatePseudoRandomVRF(i),
Voting: readOnlyParticipationVotes[i],
FirstValid: firstValid,
LastValid: lastValid,
- Store: partAccess,
- }
-
- err = n.accounts[i].Persist()
-
- if err != nil {
- panic(err)
}
acctData := basics.AccountData{
diff --git a/agreement/fuzzer/keyManager_test.go b/agreement/fuzzer/keyManager_test.go
index 1686552c0..c888b4955 100644
--- a/agreement/fuzzer/keyManager_test.go
+++ b/agreement/fuzzer/keyManager_test.go
@@ -23,15 +23,12 @@ import (
type simpleKeyManager []account.Participation
-func (m simpleKeyManager) Keys() []account.Participation {
- return m
-}
-
-func (m simpleKeyManager) HasLiveKeys(from, to basics.Round) bool {
+func (m simpleKeyManager) VotingKeys(votingRound, _ basics.Round) []account.Participation {
+ var km []account.Participation
for _, acc := range m {
- if acc.OverlapsInterval(from, to) {
- return true
+ if acc.OverlapsInterval(votingRound, votingRound) {
+ km = append(km, acc)
}
}
- return false
+ return km
}
diff --git a/agreement/gossip/networkFull_test.go b/agreement/gossip/networkFull_test.go
index 2a2a0a84b..54b7899b6 100644
--- a/agreement/gossip/networkFull_test.go
+++ b/agreement/gossip/networkFull_test.go
@@ -202,12 +202,12 @@ func testNetworkImplMixed(t *testing.T, nodesCount int) {
nets, counters := spinNetwork(t, nodesCount)
defer shutdownNetwork(nets, counters)
- nets[0].broadcastTimeout(protocol.AgreementVoteTag, []byte{1}, testNetTimeout)
- nets[0].broadcastTimeout(protocol.ProposalPayloadTag, []byte{1}, testNetTimeout)
- nets[0].broadcastTimeout(protocol.ProposalPayloadTag, []byte{1}, testNetTimeout)
- nets[0].broadcastTimeout(protocol.VoteBundleTag, []byte{1}, testNetTimeout)
- nets[0].broadcastTimeout(protocol.VoteBundleTag, []byte{1}, testNetTimeout)
- nets[0].broadcastTimeout(protocol.VoteBundleTag, []byte{1}, testNetTimeout)
+ nets[0].Broadcast(protocol.AgreementVoteTag, []byte{1})
+ nets[0].Broadcast(protocol.ProposalPayloadTag, []byte{1})
+ nets[0].Broadcast(protocol.ProposalPayloadTag, []byte{1})
+ nets[0].Broadcast(protocol.VoteBundleTag, []byte{1})
+ nets[0].Broadcast(protocol.VoteBundleTag, []byte{1})
+ nets[0].Broadcast(protocol.VoteBundleTag, []byte{1})
for i, counter := range counters {
if i != 0 {
if !counter.verify(t, 1, 2, 3) {
@@ -228,14 +228,14 @@ func testNetworkImplMixed2(t *testing.T, nodesCount int) {
const loadSize = 12
for i := byte(0); i < loadSize; i++ {
- ok := nets[0].broadcastTimeout(protocol.AgreementVoteTag, []byte{i}, testNetTimeout)
+ ok := nets[0].Broadcast(protocol.AgreementVoteTag, []byte{i})
assert.NoError(t, ok)
if i%2 == 0 {
- ok = nets[0].broadcastTimeout(protocol.ProposalPayloadTag, []byte{i}, testNetTimeout)
+ ok = nets[0].Broadcast(protocol.ProposalPayloadTag, []byte{i})
assert.NoError(t, ok)
}
if i%4 == 0 {
- ok = nets[0].broadcastTimeout(protocol.VoteBundleTag, []byte{i}, testNetTimeout)
+ ok = nets[0].Broadcast(protocol.VoteBundleTag, []byte{i})
assert.NoError(t, ok)
}
}
@@ -266,14 +266,14 @@ func testNetworkImplReordered(t *testing.T, nodesCount int) {
wg.Add(loadSize)
for i := byte(0); i < loadSize; i++ {
go func(i byte) {
- ok := nets[0].broadcastTimeout(protocol.AgreementVoteTag, []byte{i}, testNetTimeout)
+ ok := nets[0].Broadcast(protocol.AgreementVoteTag, []byte{i})
assert.NoError(t, ok)
if i%2 == 0 {
- ok = nets[0].broadcastTimeout(protocol.ProposalPayloadTag, []byte{i}, testNetTimeout)
+ ok = nets[0].Broadcast(protocol.ProposalPayloadTag, []byte{i})
assert.NoError(t, ok)
}
if i%4 == 0 {
- ok = nets[0].broadcastTimeout(protocol.VoteBundleTag, []byte{i}, testNetTimeout)
+ ok = nets[0].Broadcast(protocol.VoteBundleTag, []byte{i})
assert.NoError(t, ok)
}
wg.Done()
@@ -323,7 +323,7 @@ func testNetworkImplRebroadcast(t *testing.T, nodesCount int) {
rebroadcastNodes = 3
}
for i := byte(0); i < byte(rebroadcastNodes); i++ {
- ok := nets[i].broadcastTimeout(protocol.AgreementVoteTag, []byte{i, i + 1}, testNetTimeout)
+ ok := nets[i].Broadcast(protocol.AgreementVoteTag, []byte{i, i + 1})
assert.NoError(t, ok)
}
diff --git a/agreement/pseudonode.go b/agreement/pseudonode.go
index 66cc2ae55..e2809fb13 100644
--- a/agreement/pseudonode.go
+++ b/agreement/pseudonode.go
@@ -67,14 +67,16 @@ type pseudonode interface {
// asyncPseudonode creates proposals and votes asynchronously.
type asyncPseudonode struct {
- factory BlockFactory
- validator BlockValidator
- keys KeyManager
- ledger Ledger
- log serviceLogger
- quit chan struct{} // a quit signal for the verifier goroutines
- closeWg *sync.WaitGroup // frontend waitgroup to get notified when all the verifier goroutines are done.
- monitor *coserviceMonitor
+ factory BlockFactory
+ validator BlockValidator
+ keys KeyManager
+ ledger Ledger
+ log serviceLogger
+ quit chan struct{} // a quit signal for the verifier goroutines
+ closeWg *sync.WaitGroup // frontend waitgroup to get notified when all the verifier goroutines are done.
+ monitor *coserviceMonitor
+ participationKeysRound basics.Round // the round to which the participationKeys matches
+ participationKeys []account.Participation // the list of the participation keys for round participationKeysRound
proposalsVerifier *pseudonodeVerifier // dynamically generated verifier goroutine that manages incoming proposals making request.
votesVerifier *pseudonodeVerifier // dynamically generated verifier goroutine that manages incoming votes making request.
@@ -193,34 +195,53 @@ func (n asyncPseudonode) MakeVotes(ctx context.Context, r round, p period, s ste
}
}
-func (n asyncPseudonode) makeProposalsTask(ctx context.Context, r round, p period) pseudonodeProposalsTask {
- participation := n.getParticipations("asyncPseudonode.makeProposalsTask", r)
+// load the participation keys from the account manager ( as needed ) for the
+// current round.
+func (n *asyncPseudonode) loadRoundParticipationKeys(voteRound basics.Round) []account.Participation {
+ // if we've already loaded up the keys, then just skip loading them.
+ if n.participationKeysRound == voteRound {
+ return n.participationKeys
+ }
+ cparams, err := n.ledger.ConsensusParams(ParamsRound(voteRound))
+ if err != nil {
+ // if we cannot figure out the balance round number, reset the parameters so that we won't be sending
+ // any vote.
+ n.log.Warnf("asyncPseudonode: unable to retrieve consensus parameters for voting round %d : %v", voteRound, err)
+ n.participationKeysRound = basics.Round(0)
+ n.participationKeys = nil
+ return nil
+ }
+ balanceRound := balanceRound(voteRound, cparams)
+
+ // otherwise, we want to load the participation keys.
+ n.participationKeys = n.keys.VotingKeys(voteRound, balanceRound)
+ n.participationKeysRound = voteRound
+ return n.participationKeys
+}
+
+func (n asyncPseudonode) makeProposalsTask(ctx context.Context, r round, p period) pseudonodeProposalsTask {
pt := pseudonodeProposalsTask{
pseudonodeBaseTask: pseudonodeBaseTask{
- node: &n,
- context: ctx,
- participation: participation,
- out: make(chan externalEvent),
+ node: &n,
+ context: ctx,
+ out: make(chan externalEvent),
},
round: r,
period: p,
}
- if len(participation) == 0 {
+ if !pt.populateParticipationKeys(r) {
close(pt.out)
}
return pt
}
func (n asyncPseudonode) makeVotesTask(ctx context.Context, r round, p period, s step, prop proposalValue, persistStateDone chan error) pseudonodeVotesTask {
- participation := n.getParticipations("asyncPseudonode.makeVotesTask", r)
-
pvt := pseudonodeVotesTask{
pseudonodeBaseTask: pseudonodeBaseTask{
- node: &n,
- context: ctx,
- participation: participation,
- out: make(chan externalEvent),
+ node: &n,
+ context: ctx,
+ out: make(chan externalEvent),
},
round: r,
period: p,
@@ -228,7 +249,7 @@ func (n asyncPseudonode) makeVotesTask(ctx context.Context, r round, p period, s
prop: prop,
persistStateDone: persistStateDone,
}
- if len(participation) == 0 {
+ if !pvt.populateParticipationKeys(r) {
close(pvt.out)
}
return pvt
@@ -244,21 +265,6 @@ func (n asyncPseudonode) makePseudonodeVerifier(voteVerifier *AsyncVoteVerifier)
return pv
}
-// getParticipations retrieves the participation accounts for a given round.
-func (n asyncPseudonode) getParticipations(procName string, round basics.Round) []account.Participation {
- keys := n.keys.Keys()
- participations := make([]account.Participation, 0, len(keys))
- for _, part := range keys {
- firstValid, lastValid := part.ValidInterval()
- if round < firstValid || round > lastValid {
- n.log.Debugf("%v (round=%v): Account %v not participating: %v not in [%v, %v]", procName, round, part.Address(), round, firstValid, lastValid)
- continue
- }
- participations = append(participations, part)
- }
- return participations
-}
-
// makeProposals creates a slice of block proposals for the given round and period.
func (n asyncPseudonode) makeProposals(round basics.Round, period period, accounts []account.Participation) ([]proposal, []unauthenticatedVote) {
deadline := time.Now().Add(config.ProposalAssemblyTime)
@@ -334,6 +340,14 @@ func (pv *pseudonodeVerifier) verifierLoop(n *asyncPseudonode) {
}
}
+// populateParticipationKeys refreshes the participation key cache ( as needed ), and updates the
+// task with the loaded participation keys. It returns whether we have any participation keys
+// for the given round.
+func (t *pseudonodeBaseTask) populateParticipationKeys(r round) bool {
+ t.participation = t.node.loadRoundParticipationKeys(r)
+ return len(t.participation) > 0
+}
+
func (t pseudonodeBaseTask) outputChannel() chan externalEvent {
return t.out
}
diff --git a/agreement/pseudonode_test.go b/agreement/pseudonode_test.go
index b74b83fd3..ca6b11c42 100644
--- a/agreement/pseudonode_test.go
+++ b/agreement/pseudonode_test.go
@@ -23,7 +23,10 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
)
@@ -131,14 +134,13 @@ func compareEventChannels(t *testing.T, ch1, ch2 <-chan externalEvent) bool {
func TestPseudonode(t *testing.T) {
t.Parallel()
- logging.Base().SetLevel(logging.Warn)
-
// generate a nice, fixed hash.
rootSeed := sha256.Sum256([]byte(t.Name()))
accounts, balances := createTestAccountsAndBalances(t, 10, rootSeed[:])
ledger := makeTestLedger(balances)
- sLogger := serviceLogger{logging.Base()}
+ sLogger := serviceLogger{logging.NewLogger()}
+ sLogger.SetLevel(logging.Warn)
keyManager := simpleKeyManager(accounts)
pb := makePseudonode(pseudonodeParams{
@@ -281,7 +283,8 @@ func (n serializedPseudonode) MakeProposals(ctx context.Context, r round, p peri
verifier := makeCryptoVerifier(n.ledger, n.validator, MakeAsyncVoteVerifier(nil), n.log)
defer verifier.Quit()
- participation := n.getParticipations("serializedPseudonode.MakeProposals", r)
+ n.loadRoundParticipationKeys(n.ledger.NextRound())
+ participation := n.participationKeys
proposals, votes := n.makeProposals(r, p, participation)
@@ -337,7 +340,8 @@ func (n serializedPseudonode) MakeVotes(ctx context.Context, r round, p period,
verifier := makeCryptoVerifier(n.ledger, n.validator, MakeAsyncVoteVerifier(nil), n.log)
defer verifier.Quit()
- participation := n.getParticipations("serializedPseudonode.MakeVotes", r)
+ n.loadRoundParticipationKeys(r)
+ participation := n.participationKeys
votes := n.makeVotes(r, p, s, prop, participation)
@@ -374,3 +378,67 @@ func (n serializedPseudonode) MakeVotes(ctx context.Context, r round, p period,
func (n serializedPseudonode) Quit() {
// nothing to do ! this serializedPseudonode is so simplified that no destructor is needed.
}
+
+type KeyManagerProxy struct {
+ target func(basics.Round, basics.Round) []account.Participation
+}
+
+func (k *KeyManagerProxy) VotingKeys(votingRound, balanceRound basics.Round) []account.Participation {
+ return k.target(votingRound, balanceRound)
+}
+
+func TestPseudonodeLoadingOfParticipationKeys(t *testing.T) {
+ t.Parallel()
+
+ // generate a nice, fixed hash.
+ rootSeed := sha256.Sum256([]byte(t.Name()))
+ accounts, balances := createTestAccountsAndBalances(t, 10, rootSeed[:])
+ ledger := makeTestLedger(balances)
+
+ sLogger := serviceLogger{logging.NewLogger()}
+ sLogger.SetLevel(logging.Warn)
+
+ keyManager := simpleKeyManager(accounts)
+ pb := makePseudonode(pseudonodeParams{
+ factory: testBlockFactory{Owner: 0},
+ validator: testBlockValidator{},
+ keys: keyManager,
+ ledger: ledger,
+ voteVerifier: MakeAsyncVoteVerifier(nil),
+ log: sLogger,
+ monitor: nil,
+ }).(asyncPseudonode)
+ // verify start condition -
+ require.Zero(t, pb.participationKeysRound)
+ require.Empty(t, pb.participationKeys)
+
+ // check after round 1
+ pb.loadRoundParticipationKeys(basics.Round(1))
+ require.Equal(t, basics.Round(1), pb.participationKeysRound)
+ require.NotEmpty(t, pb.participationKeys)
+
+ // check the participationKeys retain their prev valud after a call to loadRoundParticipationKeys with 1.
+ pb.participationKeys = nil
+ pb.loadRoundParticipationKeys(basics.Round(1))
+ require.Equal(t, basics.Round(1), pb.participationKeysRound)
+ require.Nil(t, pb.participationKeys)
+
+ // check that it's being updated when asked with a different round number.
+ returnedPartKeys := pb.loadRoundParticipationKeys(basics.Round(2))
+ require.Equal(t, basics.Round(2), pb.participationKeysRound)
+ require.NotEmpty(t, pb.participationKeys)
+ require.Equal(t, pb.participationKeys, returnedPartKeys)
+
+ // test to see that loadRoundParticipationKeys is calling VotingKeys with the correct parameters.
+ keyManagerProxy := &KeyManagerProxy{}
+ pb.keys = keyManagerProxy
+ cparams, _ := ledger.ConsensusParams(0)
+ for rnd := basics.Round(3); rnd < 1000; rnd += 43 {
+ keyManagerProxy.target = func(votingRound, balanceRnd basics.Round) []account.Participation {
+ require.Equal(t, rnd, votingRound)
+ require.Equal(t, balanceRound(rnd, cparams), balanceRnd)
+ return keyManager.VotingKeys(votingRound, balanceRnd)
+ }
+ pb.loadRoundParticipationKeys(basics.Round(rnd))
+ }
+}
diff --git a/agreement/service_test.go b/agreement/service_test.go
index 464478b2c..f3db45a85 100644
--- a/agreement/service_test.go
+++ b/agreement/service_test.go
@@ -106,23 +106,14 @@ func (c *testingClock) fire(d time.Duration) {
type simpleKeyManager []account.Participation
-func (m simpleKeyManager) Keys() []account.Participation {
+func (m simpleKeyManager) VotingKeys(votingRound, _ basics.Round) []account.Participation {
var km []account.Participation
for _, acc := range m {
- km = append(km, acc)
- }
- return km
-}
-
-// HasLiveKeys returns true if we have any Participation
-// keys valid for the specified round range (inclusive)
-func (m simpleKeyManager) HasLiveKeys(from, to basics.Round) bool {
- for _, acc := range m {
- if acc.OverlapsInterval(from, to) {
- return true
+ if acc.OverlapsInterval(votingRound, votingRound) {
+ km = append(km, acc)
}
}
- return false
+ return km
}
func (m simpleKeyManager) DeleteOldKeys(basics.Round) {
@@ -678,21 +669,12 @@ func createTestAccountsAndBalances(t *testing.T, numNodes int, rootSeed []byte)
// save partkeys to db
{
- partAccess, err := db.MakeAccessor(t.Name()+"part"+strconv.Itoa(i+off), false, true)
- if err != nil {
- panic(err)
- }
accounts[i] = account.Participation{
Parent: rootAddress,
VRF: generatePseudoRandomVRF(i),
Voting: v,
FirstValid: firstValid,
LastValid: lastValid,
- Store: partAccess,
- }
- err = accounts[i].Persist()
- if err != nil {
- panic(err)
}
}
diff --git a/buildnumber.dat b/buildnumber.dat
index 1e8b31496..573541ac9 100644
--- a/buildnumber.dat
+++ b/buildnumber.dat
@@ -1 +1 @@
-6
+0
diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go
index efe577768..c6bdf8ac0 100644
--- a/catchup/catchpointService.go
+++ b/catchup/catchpointService.go
@@ -101,12 +101,6 @@ func MakeResumedCatchpointCatchupService(ctx context.Context, node CatchpointCat
net: net,
ledger: l,
config: cfg,
- blocksDownloadPeerSelector: makePeerSelector(
- net,
- []peerClass{
- {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
- {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
- }),
}
service.lastBlockHeader, err = l.BlockHdr(l.Latest())
if err != nil {
@@ -116,7 +110,7 @@ func MakeResumedCatchpointCatchupService(ctx context.Context, node CatchpointCat
if err != nil {
return nil, err
}
-
+ service.initDownloadPeerSelector()
return service, nil
}
@@ -138,17 +132,12 @@ func MakeNewCatchpointCatchupService(catchpoint string, node CatchpointCatchupNo
net: net,
ledger: l,
config: cfg,
- blocksDownloadPeerSelector: makePeerSelector(
- net,
- []peerClass{
- {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
- {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
- }),
}
service.lastBlockHeader, err = l.BlockHdr(l.Latest())
if err != nil {
return nil, err
}
+ service.initDownloadPeerSelector()
return service, nil
}
@@ -350,6 +339,8 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
if ledgerBlock, err := cs.ledger.Block(blockRound); err == nil {
blk = &ledgerBlock
}
+ var protoParams config.ConsensusParams
+ var ok bool
for {
attemptsCount++
@@ -367,7 +358,7 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
}
// check block protocol version support.
- if _, ok := config.Consensus[blk.BlockHeader.CurrentProtocol]; !ok {
+ if protoParams, ok = config.Consensus[blk.BlockHeader.CurrentProtocol]; !ok {
cs.log.Warnf("processStageLastestBlockDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol)
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
@@ -379,6 +370,18 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
return cs.abort(fmt.Errorf("processStageLastestBlockDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol))
}
+ // We need to compare explicitly the genesis hash since we're not doing any block validation. This would ensure the genesis.json file matches the block that we've receieved.
+ if protoParams.SupportGenesisHash && blk.GenesisHash() != cs.ledger.GenesisHash() {
+ cs.log.Warnf("processStageLastestBlockDownload: genesis hash mismatches : genesis hash on genesis.json file is %v while genesis hash of downloaded block is %v", cs.ledger.GenesisHash(), blk.GenesisHash())
+ if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
+ // try again.
+ blk = nil
+ cs.blocksDownloadPeerSelector.RankPeer(peer, peerRankInvalidDownload)
+ continue
+ }
+ return cs.abort(fmt.Errorf("processStageLastestBlockDownload: genesis hash mismatches : genesis hash on genesis.json file is %v while genesis hash of downloaded block is %v", cs.ledger.GenesisHash(), blk.GenesisHash()))
+ }
+
// check to see that the block header and the block payset aligns
if !blk.ContentsMatchHeader() {
cs.log.Warnf("processStageLastestBlockDownload: downloaded block content does not match downloaded block header")
@@ -595,9 +598,8 @@ func (cs *CatchpointCatchupService) fetchBlock(round basics.Round, retryCount ui
}
return nil, time.Duration(0), peer, true, cs.abort(fmt.Errorf("fetchBlock: recurring non-HTTP peer was provided by the peer selector"))
}
- fetcher := makeHTTPFetcher(cs.log, httpPeer, cs.net, &cs.config)
- blockDownloadStartTime := time.Now()
- blk, _, err = fetcher.FetchBlock(cs.ctx, round)
+ fetcher := makeUniversalBlockFetcher(cs.log, cs.net, cs.config)
+ blk, _, downloadDuration, err = fetcher.fetchBlock(cs.ctx, round, httpPeer)
if err != nil {
if cs.ctx.Err() != nil {
return nil, time.Duration(0), peer, true, cs.stopOrAbort()
@@ -611,8 +613,6 @@ func (cs *CatchpointCatchupService) fetchBlock(round basics.Round, retryCount ui
return nil, time.Duration(0), peer, true, cs.abort(fmt.Errorf("fetchBlock failed after multiple blocks download attempts"))
}
// success
- fetcher.Close()
- downloadDuration = time.Now().Sub(blockDownloadStartTime)
return blk, downloadDuration, peer, false, nil
}
@@ -716,3 +716,20 @@ func (cs *CatchpointCatchupService) updateBlockRetrievalStatistics(aquiredBlocks
cs.stats.AcquiredBlocks = uint64(int64(cs.stats.AcquiredBlocks) + aquiredBlocksDelta)
cs.stats.VerifiedBlocks = uint64(int64(cs.stats.VerifiedBlocks) + verifiedBlocksDelta)
}
+
+func (cs *CatchpointCatchupService) initDownloadPeerSelector() {
+ if cs.config.EnableCatchupFromArchiveServers {
+ cs.blocksDownloadPeerSelector = makePeerSelector(
+ cs.net,
+ []peerClass{
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
+ })
+ } else {
+ cs.blocksDownloadPeerSelector = makePeerSelector(
+ cs.net,
+ []peerClass{
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookRelays},
+ })
+ }
+}
diff --git a/catchup/fetcher.go b/catchup/fetcher.go
deleted file mode 100644
index c1f2be604..000000000
--- a/catchup/fetcher.go
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package catchup
-
-import (
- "context"
- "errors"
- "fmt"
- "math/rand"
-
- "github.com/algorand/go-deadlock"
-
- "github.com/algorand/go-algorand/agreement"
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/network"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/rpcs"
-)
-
-// Fetcher queries the current block of the network, and fetches agreed-upon blocks
-type Fetcher interface {
- // FetchBlock fetches a block for a given round.
- FetchBlock(ctx context.Context, r basics.Round) (*bookkeeping.Block, *agreement.Certificate, FetcherClient, error)
-
- // Whether the fetcher has anyone available to ask for the block associated with round
- OutOfPeers(round basics.Round) bool
-
- // NumPeers return the number of peers that this fetcher has available
- NumPeers() int
-
- // Close cleans up this fetcher
- Close()
-}
-
-// FetcherFactory creates fetchers
-type FetcherFactory interface {
- // Create a new fetcher
- New() Fetcher
- // Create a new fetcher that also fetches from backup peers over gossip network utilising given message tag
- NewOverGossip() Fetcher
-}
-
-// NetworkFetcherFactory creates network fetchers
-type NetworkFetcherFactory struct {
- net network.GossipNode
- peerLimit int
- cfg *config.Local
-
- log logging.Logger
-}
-
-func (factory NetworkFetcherFactory) makeHTTPFetcherFromPeer(log logging.Logger, peer network.Peer) FetcherClient {
- hp, ok := peer.(network.HTTPPeer)
- if ok {
- return MakeHTTPFetcher(log, hp, factory.net, factory.cfg)
- }
- log.Errorf("%T %#v is not HTTPPeer", peer, peer)
- return nil
-}
-
-// MakeNetworkFetcherFactory returns a network fetcher factory, that associates fetchers with no more than peerLimit peers from the aggregator.
-// WSClientSource can be nil, if no network exists to create clients from (defaults to http clients)
-func MakeNetworkFetcherFactory(net network.GossipNode, peerLimit int, cfg *config.Local) NetworkFetcherFactory {
- var factory NetworkFetcherFactory
- factory.net = net
- factory.peerLimit = peerLimit
- factory.log = logging.Base()
- factory.cfg = cfg
- return factory
-}
-
-// BuildFetcherClients returns a set of clients we can fetch blocks from
-func (factory NetworkFetcherFactory) BuildFetcherClients() []FetcherClient {
- peers := factory.net.GetPeers(network.PeersPhonebookRelays)
- factory.log.Debugf("%d outgoing peers", len(peers))
- if len(peers) == 0 {
- factory.log.Warn("no outgoing peers for BuildFetcherClients")
- return nil
- }
- out := make([]FetcherClient, 0, len(peers))
- for _, peer := range peers {
- fetcher := factory.makeHTTPFetcherFromPeer(factory.log, peer)
- if fetcher != nil {
- out = append(out, fetcher)
- }
- }
- return out
-}
-
-// New returns a new fetcher
-func (factory NetworkFetcherFactory) New() Fetcher {
- return &NetworkFetcher{
- roundUpperBound: make(map[FetcherClient]basics.Round),
- activeFetches: make(map[FetcherClient]int),
- peers: factory.BuildFetcherClients(),
- log: logging.Base(),
- }
-}
-
-// NewOverGossip returns a fetcher using the given message tag.
-// If there are gossip peers, then it returns a fetcher over gossip
-// Otherwise, it returns an HTTP fetcher
-func (factory NetworkFetcherFactory) NewOverGossip() Fetcher {
- gossipPeers := factory.net.GetPeers(network.PeersConnectedIn)
- factory.log.Debugf("%d gossip peers", len(gossipPeers))
- if len(gossipPeers) == 0 {
- factory.log.Info("no gossip peers for NewOverGossip")
- return factory.New()
- }
- f := MakeWsFetcher(factory.log, gossipPeers, factory.cfg)
- return &ComposedFetcher{fetchers: []Fetcher{factory.New(), f}}
-}
-
-// NetworkFetcher fetches data from remote RPC clients
-type NetworkFetcher struct {
- roundUpperBound map[FetcherClient]basics.Round
- activeFetches map[FetcherClient]int
- peers []FetcherClient
- mu deadlock.RWMutex
- log logging.Logger
-}
-
-func (networkFetcher *NetworkFetcher) availablePeers(round basics.Round) []FetcherClient {
- // filter clients who don't claim to have the round we want, and
- // return clients that have the fewest active fetches right now.
- minActiveFetches := -1
- for client, activeFetches := range networkFetcher.activeFetches {
- roundUpperBound, exists := networkFetcher.roundUpperBound[client]
- if exists && round >= roundUpperBound {
- continue
- }
-
- if minActiveFetches == -1 {
- minActiveFetches = activeFetches
- }
- if activeFetches < minActiveFetches {
- minActiveFetches = activeFetches
- }
- }
-
- pool := make([]FetcherClient, 0)
- for _, client := range networkFetcher.peers {
- activeFetches, exists := networkFetcher.activeFetches[client]
- if exists && activeFetches > minActiveFetches && minActiveFetches != -1 {
- continue
- }
- if roundUpperBound, exists := networkFetcher.roundUpperBound[client]; !exists || round < roundUpperBound {
- // client doesn't have this block
- pool = append(pool, client)
- }
- }
-
- return pool
-}
-
-func (networkFetcher *NetworkFetcher) selectClient(r basics.Round) (FetcherClient, error) {
- networkFetcher.mu.Lock()
- defer networkFetcher.mu.Unlock()
-
- availableClients := networkFetcher.availablePeers(r)
- if len(availableClients) == 0 {
- return nil, errors.New("no peers to ask")
- }
-
- // select one of the peers at random
- i := rand.Uint64() % uint64(len(availableClients))
- client := availableClients[i]
- networkFetcher.activeFetches[client] = networkFetcher.activeFetches[client] + 1
- return client, nil
-}
-
-func (networkFetcher *NetworkFetcher) releaseClient(client FetcherClient) {
- networkFetcher.mu.Lock()
- defer networkFetcher.mu.Unlock()
- networkFetcher.activeFetches[client] = networkFetcher.activeFetches[client] - 1
-}
-
-func (networkFetcher *NetworkFetcher) markPeerLastRound(client FetcherClient, round basics.Round) {
- networkFetcher.mu.Lock()
- defer networkFetcher.mu.Unlock()
-
- currentLastRound, hasBound := networkFetcher.roundUpperBound[client]
- if !hasBound || currentLastRound > round {
- networkFetcher.roundUpperBound[client] = round
- }
-}
-
-// FetchBlock returns a block for round r
-func (networkFetcher *NetworkFetcher) FetchBlock(ctx context.Context, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, rpcc FetcherClient, err error) {
- client, err := networkFetcher.selectClient(r)
- if err != nil {
- return
- }
- defer networkFetcher.releaseClient(client)
- networkFetcher.log.Infof("networkFetcher.FetchBlock: asking client %v for block %v", client.Address(), r)
-
- fetchedBuf, err := client.GetBlockBytes(ctx, r)
- if err != nil {
- networkFetcher.markPeerLastRound(client, r)
- err = fmt.Errorf("Peer %v: %v", client.Address(), err)
- return
- }
- block, cert, err := processBlockBytes(fetchedBuf, r, client.Address())
- if err != nil {
- networkFetcher.markPeerLastRound(client, r)
- return
- }
- return block, cert, client, nil
-}
-
-// NumPeers return the number of peers that this fetcher has available
-func (networkFetcher *NetworkFetcher) NumPeers() int {
- networkFetcher.mu.RLock()
- defer networkFetcher.mu.RUnlock()
-
- return len(networkFetcher.peers)
-}
-
-// OutOfPeers returns whether there are any peers that may have the block of a particular round
-func (networkFetcher *NetworkFetcher) OutOfPeers(round basics.Round) bool {
- networkFetcher.mu.RLock()
- defer networkFetcher.mu.RUnlock()
-
- return len(networkFetcher.availablePeers(round)) == 0
-}
-
-// Close implements Fetcher. Nothing to clean up here.
-func (networkFetcher *NetworkFetcher) Close() {}
-
-// ComposedFetcher wraps multiple fetchers in some priority order
-type ComposedFetcher struct {
- fetchers []Fetcher // ordered by priority
-}
-
-// NumPeers implements Fetcher.NumPeers
-func (cf *ComposedFetcher) NumPeers() int {
- g := 0
- for _, f := range cf.fetchers {
- g += f.NumPeers()
- }
- return g
-}
-
-// OutOfPeers implements Fetcher.OutOfPeers
-func (cf *ComposedFetcher) OutOfPeers(round basics.Round) bool {
- for _, f := range cf.fetchers {
- if !f.OutOfPeers(round) {
- return false
- }
- }
- return true
-}
-
-// FetchBlock implements Fetcher.FetchBlock
-func (cf *ComposedFetcher) FetchBlock(ctx context.Context, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, rpcc FetcherClient, err error) {
- for _, f := range cf.fetchers {
- if f.OutOfPeers(r) {
- continue
- }
- return f.FetchBlock(ctx, r)
- }
- err = errors.New("no peers in any fetchers")
- return
-}
-
-// Close implements Fetcher.Close
-func (cf *ComposedFetcher) Close() {
- for _, f := range cf.fetchers {
- f.Close()
- }
-}
-
-/* Utils */
-
-func processBlockBytes(fetchedBuf []byte, r basics.Round, debugStr string) (blk *bookkeeping.Block, cert *agreement.Certificate, err error) {
- var decodedEntry rpcs.EncodedBlockCert
- err = protocol.Decode(fetchedBuf, &decodedEntry)
- if err != nil {
- err = fmt.Errorf("networkFetcher.FetchBlock(%d): cannot decode block from peer %v: %v", r, debugStr, err)
- return
- }
-
- if decodedEntry.Block.Round() != r {
- err = fmt.Errorf("networkFetcher.FetchBlock(%d): got wrong block from peer %v: wanted %v, got %v", r, debugStr, r, decodedEntry.Block.Round())
- return
- }
-
- if decodedEntry.Certificate.Round != r {
- err = fmt.Errorf("networkFetcher.FetchBlock(%d): got wrong cert from peer %v: wanted %v, got %v", r, debugStr, r, decodedEntry.Certificate.Round)
- return
- }
- return &decodedEntry.Block, &decodedEntry.Certificate, nil
-}
diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go
index fc56eab52..fd48d0902 100644
--- a/catchup/fetcher_test.go
+++ b/catchup/fetcher_test.go
@@ -18,14 +18,11 @@ package catchup
import (
"context"
- "errors"
"net"
"net/http"
- "net/rpc"
"net/url"
"strings"
"testing"
- "time"
"github.com/gorilla/mux"
"github.com/stretchr/testify/require"
@@ -41,384 +38,9 @@ import (
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/rpcs"
- "github.com/algorand/go-algorand/util/bloom"
)
-type mockRunner struct {
- ran bool
- done chan *rpc.Call
- failWithNil bool
- failWithError bool
- txgroups [][]transactions.SignedTxn
-}
-
-type mockRPCClient struct {
- client *mockRunner
- closed bool
- rootURL string
- log logging.Logger
-}
-
-func (client *mockRPCClient) Close() error {
- client.closed = true
- return nil
-}
-
-func (client *mockRPCClient) Address() string {
- return "mock.address."
-}
-func (client *mockRPCClient) Sync(ctx context.Context, bloom *bloom.Filter) (txgroups [][]transactions.SignedTxn, err error) {
- client.log.Info("MockRPCClient.Sync")
- select {
- case <-ctx.Done():
- return nil, errors.New("cancelled")
- default:
- }
- if client.client.failWithNil {
- return nil, errors.New("old failWithNil")
- }
- if client.client.failWithError {
- return nil, errors.New("failing call")
- }
- return client.client.txgroups, nil
-}
-func (client *mockRPCClient) GetBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) {
- return nil, nil
-}
-
-// network.HTTPPeer interface
-func (client *mockRPCClient) GetAddress() string {
- return client.rootURL
-}
-func (client *mockRPCClient) GetHTTPClient() *http.Client {
- return nil
-}
-
-type mockClientAggregator struct {
- mocks.MockNetwork
- peers []network.Peer
-}
-
-func (mca *mockClientAggregator) GetPeers(options ...network.PeerOption) []network.Peer {
- return mca.peers
-}
-
-const numberOfPeers = 10
-
-func makeMockClientAggregator(t *testing.T, failWithNil bool, failWithError bool) *mockClientAggregator {
- clients := make([]network.Peer, 0)
- for i := 0; i < numberOfPeers; i++ {
- runner := mockRunner{failWithNil: failWithNil, failWithError: failWithError, done: make(chan *rpc.Call)}
- clients = append(clients, &mockRPCClient{client: &runner, log: logging.TestingLog(t)})
- }
- t.Logf("len(mca.clients) = %d", len(clients))
- return &mockClientAggregator{peers: clients}
-}
-
-func getAllClientsSelectedForRound(t *testing.T, fetcher *NetworkFetcher, round basics.Round) map[FetcherClient]basics.Round {
- selected := make(map[FetcherClient]basics.Round, 0)
- for i := 0; i < 1000; i++ {
- c, err := fetcher.selectClient(round)
- if err != nil {
- return selected
- }
- selected[c.(FetcherClient)] = fetcher.roundUpperBound[c]
- }
- return selected
-}
-
-func TestSelectValidRemote(t *testing.T) {
- network := makeMockClientAggregator(t, false, false)
- cfg := config.GetDefaultLocal()
- factory := MakeNetworkFetcherFactory(network, numberOfPeers, &cfg)
- factory.log = logging.TestingLog(t)
- fetcher := factory.New()
- require.Equal(t, numberOfPeers, len(fetcher.(*NetworkFetcher).peers))
-
- var oldClient FetcherClient
- var newClient FetcherClient
- i := 0
- for _, client := range fetcher.(*NetworkFetcher).peers {
- if i == 0 {
- oldClient = client
- r := basics.Round(2)
- fetcher.(*NetworkFetcher).roundUpperBound[client] = r
- } else if i == 1 {
- newClient = client
- r := basics.Round(4)
- fetcher.(*NetworkFetcher).roundUpperBound[client] = r
- } else if i > 2 {
- r := basics.Round(3)
- fetcher.(*NetworkFetcher).roundUpperBound[client] = r
- } // skip i == 2
- i++
- }
-
- require.Equal(t, numberOfPeers, len(fetcher.(*NetworkFetcher).availablePeers(1)))
- selected := getAllClientsSelectedForRound(t, fetcher.(*NetworkFetcher), 1)
- require.Equal(t, numberOfPeers, len(selected))
- _, hasOld := selected[oldClient]
- require.True(t, hasOld)
-
- _, hasNew := selected[newClient]
- require.True(t, hasNew)
-
- require.Equal(t, numberOfPeers-1, len(fetcher.(*NetworkFetcher).availablePeers(2)))
- selected = getAllClientsSelectedForRound(t, fetcher.(*NetworkFetcher), 2)
- require.Equal(t, numberOfPeers-1, len(selected))
- _, hasOld = selected[oldClient]
- require.False(t, hasOld)
- _, hasNew = selected[newClient]
- require.True(t, hasNew)
-
- require.Equal(t, 2, len(fetcher.(*NetworkFetcher).availablePeers(3)))
- selected = getAllClientsSelectedForRound(t, fetcher.(*NetworkFetcher), 3)
- require.Equal(t, 2, len(selected))
- _, hasOld = selected[oldClient]
- require.False(t, hasOld)
- _, hasNew = selected[newClient]
- require.True(t, hasNew)
-
- require.Equal(t, 1, len(fetcher.(*NetworkFetcher).availablePeers(4)))
- selected = getAllClientsSelectedForRound(t, fetcher.(*NetworkFetcher), 4)
- require.Equal(t, 1, len(selected))
- _, hasOld = selected[oldClient]
- require.False(t, hasOld)
- _, hasNew = selected[newClient]
- require.False(t, hasNew)
-}
-
-type dummyFetcher struct {
- failWithNil bool
- failWithError bool
- fetchTimeout time.Duration
-}
-
-// FetcherClient interface
-func (df *dummyFetcher) GetBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) {
- if df.failWithNil {
- return nil, nil
- }
- if df.failWithError {
- return nil, errors.New("failing call")
- }
-
- timer := time.NewTimer(df.fetchTimeout)
- defer timer.Stop()
-
- // Fill in the dummy response with the correct round
- dummyBlock := rpcs.EncodedBlockCert{
- Block: bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- Round: r,
- },
- },
- Certificate: agreement.Certificate{
- Round: r,
- },
- }
-
- encodedData := protocol.Encode(&dummyBlock)
-
- select {
- case <-timer.C:
- case <-ctx.Done():
- return nil, ctx.Err()
- }
-
- return encodedData, nil
-}
-
-// FetcherClient interface
-func (df *dummyFetcher) Address() string {
- //logging.Base().Debug("dummyFetcher Address")
- return "dummyFetcher address"
-}
-
-// FetcherClient interface
-func (df *dummyFetcher) Close() error {
- //logging.Base().Debug("dummyFetcher Close")
- return nil
-}
-
-func makeDummyFetchers(failWithNil bool, failWithError bool, timeout time.Duration) []FetcherClient {
- out := make([]FetcherClient, numberOfPeers)
- for i := range out {
- out[i] = &dummyFetcher{failWithNil, failWithError, timeout}
- }
- return out
-}
-
-func TestFetchBlock(t *testing.T) {
- fetcher := &NetworkFetcher{
- roundUpperBound: make(map[FetcherClient]basics.Round),
- activeFetches: make(map[FetcherClient]int),
- peers: makeDummyFetchers(false, false, 100*time.Millisecond),
- log: logging.TestingLog(t),
- }
-
- var err error
- var block *bookkeeping.Block
- var cert *agreement.Certificate
- var client FetcherClient
-
- fetched := false
- for i := 0; i < numberOfPeers; i++ {
- start := time.Now()
- block, cert, client, err = fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
- require.NoError(t, err)
- require.NotNil(t, client)
- end := time.Now()
- require.True(t, end.Sub(start) > 100*time.Millisecond)
- require.True(t, end.Sub(start) < 100*time.Millisecond+5*time.Second) // we want to have a higher margin here, as the machine we're running on might be slow.
- if err == nil {
- require.NotEqual(t, nil, block)
- require.NotEqual(t, nil, cert)
- _, _, client, err = fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
- require.NotNil(t, client)
- require.NoError(t, err)
- fetched = true
- }
- }
- require.True(t, fetched)
-}
-
-func TestFetchBlockFail(t *testing.T) {
- fetcher := &NetworkFetcher{
- roundUpperBound: make(map[FetcherClient]basics.Round),
- activeFetches: make(map[FetcherClient]int),
- peers: makeDummyFetchers(true, false, 100*time.Millisecond),
- log: logging.TestingLog(t),
- }
-
- for i := 0; i < numberOfPeers; i++ {
- require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers)))
- _, _, _, err := fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
- require.Error(t, err)
- }
- require.True(t, fetcher.OutOfPeers(basics.Round(numberOfPeers)))
-}
-
-func TestFetchBlockAborted(t *testing.T) {
- fetcher := &NetworkFetcher{
- roundUpperBound: make(map[FetcherClient]basics.Round),
- activeFetches: make(map[FetcherClient]int),
- peers: makeDummyFetchers(false, false, 2*time.Second),
- log: logging.TestingLog(t),
- }
-
- ctx, cf := context.WithCancel(context.Background())
- defer cf()
- go func() {
- cf()
- }()
- start := time.Now()
- _, _, client, err := fetcher.FetchBlock(ctx, basics.Round(1))
- end := time.Now()
- require.True(t, strings.Contains(err.Error(), context.Canceled.Error()))
- require.Nil(t, client)
- require.True(t, end.Sub(start) < 10*time.Second)
-}
-
-func TestFetchBlockTimeout(t *testing.T) {
- fetcher := &NetworkFetcher{
- roundUpperBound: make(map[FetcherClient]basics.Round),
- activeFetches: make(map[FetcherClient]int),
- peers: makeDummyFetchers(false, false, 10*time.Second),
- log: logging.TestingLog(t),
- }
- start := time.Now()
- ctx, cf := context.WithTimeout(context.Background(), 500*time.Millisecond)
- defer cf()
- _, _, client, err := fetcher.FetchBlock(ctx, basics.Round(1))
- end := time.Now()
- require.True(t, strings.Contains(err.Error(), context.DeadlineExceeded.Error()))
- require.Nil(t, client)
- require.True(t, end.Sub(start) >= 500*time.Millisecond)
- require.True(t, end.Sub(start) < 10*time.Second)
-}
-
-func TestFetchBlockErrorCall(t *testing.T) {
- fetcher := &NetworkFetcher{
- roundUpperBound: make(map[FetcherClient]basics.Round),
- activeFetches: make(map[FetcherClient]int),
- peers: makeDummyFetchers(false, true, 10*time.Millisecond),
- log: logging.TestingLog(t),
- }
-
- require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers)))
- _, _, client, err := fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
- require.Error(t, err)
- require.Nil(t, client)
-}
-
-func TestFetchBlockComposedNoOp(t *testing.T) {
- f := &NetworkFetcher{
- roundUpperBound: make(map[FetcherClient]basics.Round),
- activeFetches: make(map[FetcherClient]int),
- peers: makeDummyFetchers(false, false, 1*time.Millisecond),
- log: logging.TestingLog(t),
- }
- fetcher := &ComposedFetcher{fetchers: []Fetcher{f, nil}}
-
- var err error
- var block *bookkeeping.Block
- var cert *agreement.Certificate
- var client FetcherClient
-
- fetched := false
- for i := 0; i < numberOfPeers; i++ {
- start := time.Now()
- block, cert, client, err = fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
- require.NoError(t, err)
- require.NotNil(t, client)
- end := time.Now()
- require.True(t, end.Sub(start) >= 1*time.Millisecond)
- require.True(t, end.Sub(start) < 1*time.Millisecond+10*time.Second) // we take a very high margin here for the fetcher to complete.
- if err == nil {
- require.NotEqual(t, nil, block)
- require.NotEqual(t, nil, cert)
- _, _, client, err = fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
- require.NotNil(t, client)
- require.NoError(t, err)
- fetched = true
- }
- }
- require.True(t, fetched)
-}
-
-// Make sure composed fetchers are hit in priority order
-func TestFetchBlockComposedFail(t *testing.T) {
- f := &NetworkFetcher{
- roundUpperBound: make(map[FetcherClient]basics.Round),
- activeFetches: make(map[FetcherClient]int),
- peers: makeDummyFetchers(true, false, 1*time.Millisecond),
- log: logging.TestingLog(t),
- }
- f2 := &NetworkFetcher{
- roundUpperBound: make(map[FetcherClient]basics.Round),
- activeFetches: make(map[FetcherClient]int),
- peers: makeDummyFetchers(false, false, 1*time.Millisecond),
- log: logging.TestingLog(t),
- }
- fetcher := &ComposedFetcher{fetchers: []Fetcher{f, f2}}
-
- for i := 0; i < numberOfPeers; i++ {
- require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers)))
- _, _, _, err := fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
- require.Error(t, err)
- }
- require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers)))
- for i := 0; i < numberOfPeers; i++ {
- require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers)))
- _, _, client, err := fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
- require.NotNil(t, client)
- require.NoError(t, err)
- }
-}
-
-func buildTestLedger(t *testing.T) (ledger *data.Ledger, next basics.Round, b bookkeeping.Block, err error) {
+func buildTestLedger(t *testing.T, blk bookkeeping.Block) (ledger *data.Ledger, next basics.Round, b bookkeeping.Block, err error) {
var user basics.Address
user[0] = 123
@@ -426,15 +48,15 @@ func buildTestLedger(t *testing.T) (ledger *data.Ledger, next basics.Round, b bo
genesis := make(map[basics.Address]basics.AccountData)
genesis[user] = basics.AccountData{
Status: basics.Online,
- MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
+ MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2000000},
}
genesis[sinkAddr] = basics.AccountData{
Status: basics.Online,
- MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
+ MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2000000},
}
genesis[poolAddr] = basics.AccountData{
Status: basics.Online,
- MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
+ MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2000000},
}
log := logging.TestingLog(t)
@@ -472,6 +94,8 @@ func buildTestLedger(t *testing.T) (ledger *data.Ledger, next basics.Round, b bo
prev, err := ledger.Block(ledger.LastRound())
require.NoError(t, err)
+ b = blk
+ b.BlockHeader.RewardsState.RewardsPool = poolAddr
b.RewardsLevel = prev.RewardsLevel
b.BlockHeader.Round = next
b.BlockHeader.GenesisHash = genHash
@@ -481,11 +105,29 @@ func buildTestLedger(t *testing.T) (ledger *data.Ledger, next basics.Round, b bo
b.Payset = []transactions.SignedTxnInBlock{
txib,
}
-
+ b.TxnRoot, err = b.PaysetCommit()
+ require.NoError(t, err)
require.NoError(t, ledger.AddBlock(b, agreement.Certificate{Round: next}))
return
}
+func addBlocks(t *testing.T, ledger *data.Ledger, blk bookkeeping.Block, numBlocks int) {
+ var err error
+ for i := 0; i < numBlocks; i++ {
+ blk.BlockHeader.Round++
+ blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000)
+ blk.TxnRoot, err = blk.PaysetCommit()
+ require.NoError(t, err)
+
+ err := ledger.AddBlock(blk, agreement.Certificate{Round: blk.BlockHeader.Round})
+ require.NoError(t, err)
+
+ hdr, err := ledger.BlockHdr(blk.BlockHeader.Round)
+ require.NoError(t, err)
+ require.Equal(t, blk.BlockHeader, hdr)
+ }
+}
+
type basicRPCNode struct {
listener net.Listener
server http.Server
@@ -572,54 +214,6 @@ func (s *httpTestPeerSource) addPeer(rootURL string) {
s.peers = append(s.peers, &peer)
}
-// Build a ledger with genesis and one block, start an HTTPServer around it, use NetworkFetcher to fetch the block.
-// For smaller test, see blockService_test.go TestGetBlockHTTP
-// todo - fix this one
-func TestGetBlockHTTP(t *testing.T) {
- // start server
- ledger, next, b, err := buildTestLedger(t)
- if err != nil {
- t.Fatal(err)
- return
- }
- net := &httpTestPeerSource{}
- ls := rpcs.MakeBlockService(config.GetDefaultLocal(), ledger, net, "test genesisID")
-
- nodeA := basicRPCNode{}
- nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
- nodeA.start()
- defer nodeA.stop()
- rootURL := nodeA.rootURL()
-
- // run fetcher
- net.addPeer(rootURL)
- _, ok := net.GetPeers(network.PeersConnectedOut)[0].(network.HTTPPeer)
- require.True(t, ok)
- cfg := config.GetDefaultLocal()
- factory := MakeNetworkFetcherFactory(net, numberOfPeers, &cfg)
- factory.log = logging.TestingLog(t)
- fetcher := factory.New()
- // we have one peer, the HTTP block server
- require.Equal(t, len(fetcher.(*NetworkFetcher).peers), 1)
-
- var block *bookkeeping.Block
- var cert *agreement.Certificate
- var client FetcherClient
-
- start := time.Now()
- block, cert, client, err = fetcher.FetchBlock(context.Background(), next)
- end := time.Now()
- require.NotNil(t, client)
- require.NoError(t, err)
-
- require.True(t, end.Sub(start) < 10*time.Second)
- require.Equal(t, &b, block)
- if err == nil {
- require.NotEqual(t, nil, block)
- require.NotEqual(t, nil, cert)
- }
-}
-
func nodePair() (*basicRPCNode, *basicRPCNode) {
nodeA := &basicRPCNode{}
nodeA.start()
@@ -632,143 +226,6 @@ func nodePair() (*basicRPCNode, *basicRPCNode) {
return nodeA, nodeB
}
-func TestGetBlockMocked(t *testing.T) {
- var user basics.Address
- user[0] = 123
-
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesis := make(map[basics.Address]basics.AccountData)
- genesis[user] = basics.AccountData{
- Status: basics.Online,
- MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
- }
- genesis[sinkAddr] = basics.AccountData{
- Status: basics.Online,
- MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
- }
- genesis[poolAddr] = basics.AccountData{
- Status: basics.Online,
- MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
- }
-
- log := logging.TestingLog(t)
- // A network with two nodes, A and B
- nodeA, nodeB := nodePair()
- defer nodeA.stop()
- defer nodeB.stop()
-
- // A is running the ledger service and will respond to fetch requests
- genBal := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
- const inMem = true
- cfg := config.GetDefaultLocal()
- cfg.Archival = true
- ledgerA, err := data.LoadLedger(
- log.With("name", "A"), t.Name(), inMem,
- protocol.ConsensusCurrentVersion, genBal, "", crypto.Digest{},
- nil, cfg,
- )
- if err != nil {
- t.Errorf("Couldn't make ledger: %v", err)
- }
- blockServiceConfig := config.GetDefaultLocal()
- blockServiceConfig.EnableBlockService = true
- rpcs.MakeBlockService(blockServiceConfig, ledgerA, nodeA, "test genesisID")
-
- next := ledgerA.NextRound()
- genHash := crypto.Digest{0x42}
- tx := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: user,
- Fee: basics.MicroAlgos{Raw: proto.MinTxnFee},
- FirstValid: next,
- LastValid: next,
- GenesisHash: genHash,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: user,
- Amount: basics.MicroAlgos{Raw: 2},
- },
- }
- signedtx := transactions.SignedTxn{
- Txn: tx,
- }
-
- var b bookkeeping.Block
- prev, err := ledgerA.Block(ledgerA.LastRound())
- require.NoError(t, err)
- b.RewardsLevel = prev.RewardsLevel
- b.BlockHeader.Round = next
- b.BlockHeader.GenesisHash = genHash
- b.CurrentProtocol = protocol.ConsensusCurrentVersion
- txib, err := b.EncodeSignedTxn(signedtx, transactions.ApplyData{})
- require.NoError(t, err)
- b.Payset = []transactions.SignedTxnInBlock{
- txib,
- }
- require.NoError(t, ledgerA.AddBlock(b, agreement.Certificate{Round: next}))
-
- // B tries to fetch block
- factory := MakeNetworkFetcherFactory(nodeB, 10, &cfg)
- factory.log = logging.TestingLog(t)
- nodeBRPC := factory.New()
- ctx, cf := context.WithTimeout(context.Background(), time.Second)
- defer cf()
- eblock, _, _, err := nodeBRPC.FetchBlock(ctx, next)
- if err != nil {
- require.Failf(t, "Error fetching block", "%v", err)
- }
- block, err := ledgerA.Block(next)
- require.NoError(t, err)
- if eblock.Hash() != block.Hash() {
- t.Errorf("FetchBlock returned wrong block: expected %v; got %v", block.Hash(), eblock)
- }
-}
-
-func TestGetFutureBlock(t *testing.T) {
- log := logging.TestingLog(t)
- // A network with two nodes, A and B
- nodeA, nodeB := nodePair()
- defer nodeA.stop()
- defer nodeB.stop()
-
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesis := make(map[basics.Address]basics.AccountData)
- genesis[sinkAddr] = basics.AccountData{
- Status: basics.Online,
- MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
- }
- genesis[poolAddr] = basics.AccountData{
- Status: basics.Online,
- MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
- }
-
- gen := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
- // A is running the ledger service and will respond to fetch requests
- const inMem = true
- cfg := config.GetDefaultLocal()
- cfg.Archival = true
- ledgerA, err := data.LoadLedger(
- log.With("name", "A"), t.Name(), inMem,
- protocol.ConsensusCurrentVersion, gen, "", crypto.Digest{},
- nil, cfg,
- )
- if err != nil {
- t.Errorf("Couldn't make ledger: %v", err)
- }
- rpcs.MakeBlockService(config.GetDefaultLocal(), ledgerA, nodeA, "test genesisID")
-
- // B tries to fetch block 4
- factory := MakeNetworkFetcherFactory(nodeB, 10, &cfg)
- factory.log = logging.TestingLog(t)
- nodeBRPC := factory.New()
- ctx, cf := context.WithTimeout(context.Background(), time.Second)
- defer cf()
- _, _, client, err := nodeBRPC.FetchBlock(ctx, ledgerA.NextRound())
- require.Error(t, err)
- require.Nil(t, client)
-}
-
// implement network.UnicastPeer
type testUnicastPeer struct {
gn network.GossipNode
@@ -839,68 +296,11 @@ func (p *testUnicastPeer) Unicast(ctx context.Context, msg []byte, tag protocol.
return nil
}
-func makeTestUnicastPeer(gn network.GossipNode, version string, t *testing.T) network.UnicastPeer {
+func makeTestUnicastPeer(gn network.GossipNode, t *testing.T) network.UnicastPeer {
wsp := testUnicastPeer{}
wsp.gn = gn
wsp.t = t
- wsp.version = version
+ wsp.version = network.ProtocolVersion
wsp.responseChannels = make(map[uint64]chan *network.Response)
return &wsp
}
-
-// A quick GetBlock over websockets test hitting a mocked websocket server (no actual connection)
-func TestGetBlockWS(t *testing.T) {
- // test the WS fetcher:
- // 1. fetcher sends UniEnsBlockReqTag to http peer
- // 2. peer send message to gossip node
- // 3. gossip node send message to ledger service
- // 4. ledger service responds with UniCatchupResTag sending it back to the http peer
- // 5. the http peer send it to the network
- // 6. the network send it back to the fetcher
-
- // start server
- ledger, next, b, err := buildTestLedger(t)
- if err != nil {
- t.Fatal(err)
- return
- }
-
- cfg := config.GetDefaultLocal()
-
- versions := []string{"2.1"}
- for _, version := range versions { // range network.SupportedProtocolVersions {
-
- net := &httpTestPeerSource{}
- blockServiceConfig := config.GetDefaultLocal()
- blockServiceConfig.CatchupParallelBlocks = 5
- blockServiceConfig.EnableBlockService = true
- ls := rpcs.MakeBlockService(blockServiceConfig, ledger, net, "test genesisID")
-
- ls.Start()
-
- up := makeTestUnicastPeer(net, version, t)
- net.peers = append(net.peers, up)
-
- _, ok := net.GetPeers(network.PeersConnectedIn)[0].(network.UnicastPeer)
- require.True(t, ok)
- factory := MakeNetworkFetcherFactory(net, numberOfPeers, &cfg)
- factory.log = logging.TestingLog(t)
- fetcher := factory.NewOverGossip()
- // we have one peer, the Ws block server
- require.Equal(t, fetcher.NumPeers(), 1)
-
- var block *bookkeeping.Block
- var cert *agreement.Certificate
- var client FetcherClient
-
- block, cert, client, err = fetcher.FetchBlock(context.Background(), next)
- require.NotNil(t, client)
- require.NoError(t, err)
- require.Equal(t, &b, block)
- if err == nil {
- require.NotEqual(t, nil, block)
- require.NotEqual(t, nil, cert)
- }
- fetcher.Close()
- }
-}
diff --git a/catchup/httpFetcher.go b/catchup/httpFetcher.go
deleted file mode 100644
index 8d06b2225..000000000
--- a/catchup/httpFetcher.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package catchup
-
-import (
- "context"
- "errors"
- "fmt"
- "net/http"
- "path"
- "strconv"
- "time"
-
- "github.com/algorand/go-algorand/agreement"
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/network"
- "github.com/algorand/go-algorand/rpcs"
-)
-
-// set max fetcher size to 5MB, this is enough to fit the block and certificate
-const fetcherMaxBlockBytes = 5 << 20
-
-var errNoBlockForRound = errors.New("No block available for given round")
-
-// FetcherClient abstracts how to GetBlockBytes from a node on the net.
-type FetcherClient interface {
- GetBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error)
- Address() string
- Close() error
-}
-
-// HTTPFetcher implements FetcherClient doing an HTTP GET of the block
-type HTTPFetcher struct {
- peer network.HTTPPeer
- rootURL string
- net network.GossipNode
-
- client *http.Client
-
- log logging.Logger
- config *config.Local
-}
-
-// MakeHTTPFetcher wraps an HTTPPeer so that we can get blocks from it, and return the FetcherClient interface
-func MakeHTTPFetcher(log logging.Logger, peer network.HTTPPeer, net network.GossipNode, cfg *config.Local) (fc FetcherClient) {
- return makeHTTPFetcher(log, peer, net, cfg)
-}
-
-// makeHTTPFetcher wraps an HTTPPeer so that we can get blocks from it, and returns a HTTPFetcher object.
-func makeHTTPFetcher(log logging.Logger, peer network.HTTPPeer, net network.GossipNode, cfg *config.Local) *HTTPFetcher {
- return &HTTPFetcher{
- peer: peer,
- rootURL: peer.GetAddress(),
- net: net,
- client: peer.GetHTTPClient(),
- log: log,
- config: cfg}
-}
-
-// GetBlockBytes gets a block.
-// Core piece of FetcherClient interface
-func (hf *HTTPFetcher) GetBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) {
- parsedURL, err := network.ParseHostOrURL(hf.rootURL)
- if err != nil {
- return nil, err
- }
-
- parsedURL.Path = hf.net.SubstituteGenesisID(path.Join(parsedURL.Path, "/v1/{genesisID}/block/"+strconv.FormatUint(uint64(r), 36)))
- blockURL := parsedURL.String()
- hf.log.Debugf("block GET %#v peer %#v %T", blockURL, hf.peer, hf.peer)
- request, err := http.NewRequest("GET", blockURL, nil)
- if err != nil {
- return nil, err
- }
- requestCtx, requestCancel := context.WithTimeout(ctx, time.Duration(hf.config.CatchupHTTPBlockFetchTimeoutSec)*time.Second)
- defer requestCancel()
- request = request.WithContext(requestCtx)
- network.SetUserAgentHeader(request.Header)
- response, err := hf.client.Do(request)
- if err != nil {
- hf.log.Debugf("GET %#v : %s", blockURL, err)
- return nil, err
- }
-
- // check to see that we had no errors.
- switch response.StatusCode {
- case http.StatusOK:
- case http.StatusNotFound: // server could not find a block with that round numbers.
- response.Body.Close()
- return nil, errNoBlockForRound
- default:
- bodyBytes, err := rpcs.ResponseBytes(response, hf.log, fetcherMaxBlockBytes)
- hf.log.Warn("HTTPFetcher.GetBlockBytes: response status code %d from '%s'. Response body '%s' ", response.StatusCode, blockURL, string(bodyBytes))
- if err == nil {
- err = fmt.Errorf("GetBlockBytes error response status code %d when requesting '%s'. Response body '%s'", response.StatusCode, blockURL, string(bodyBytes))
- } else {
- err = fmt.Errorf("GetBlockBytes error response status code %d when requesting '%s'. %w", response.StatusCode, blockURL, err)
- }
- return nil, err
- }
-
- // at this point, we've already receieved the response headers. ensure that the
- // response content type is what we'd like it to be.
- contentTypes := response.Header["Content-Type"]
- if len(contentTypes) != 1 {
- err = fmt.Errorf("http block fetcher invalid content type count %d", len(contentTypes))
- hf.log.Warn(err)
- response.Body.Close()
- return nil, err
- }
-
- // TODO: Temporarily allow old and new content types so we have time for lazy upgrades
- // Remove this 'old' string after next release.
- const blockResponseContentTypeOld = "application/algorand-block-v1"
- if contentTypes[0] != rpcs.BlockResponseContentType && contentTypes[0] != blockResponseContentTypeOld {
- hf.log.Warnf("http block fetcher response has an invalid content type : %s", contentTypes[0])
- response.Body.Close()
- return nil, fmt.Errorf("http block fetcher invalid content type '%s'", contentTypes[0])
- }
-
- return rpcs.ResponseBytes(response, hf.log, fetcherMaxBlockBytes)
-}
-
-// Address is part of FetcherClient interface.
-// Returns the root URL of the connected peer.
-func (hf *HTTPFetcher) Address() string {
- return hf.rootURL
-}
-
-// Close is part of FetcherClient interface
-//
-// Does nothing, leaves underlying client open because other HTTP
-// requests from other interfaces could be open on it. Somewhere a
-// Peer owns that connection and will close as needed.
-func (hf *HTTPFetcher) Close() error {
- return nil
-}
-
-// FetchBlock is a copy of the functionality in NetworkFetcher.FetchBlock, designed to complete
-// the HTTPFetcher functionality as a standalone fetcher
-func (hf *HTTPFetcher) FetchBlock(ctx context.Context, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, err error) {
- fetchedBuf, err := hf.GetBlockBytes(ctx, r)
- if err != nil {
- err = fmt.Errorf("Peer %v: %v", hf.Address(), err)
- return
- }
- block, cert, err := processBlockBytes(fetchedBuf, r, hf.Address())
- if err != nil {
- return
- }
- return block, cert, nil
-}
diff --git a/catchup/peerSelector.go b/catchup/peerSelector.go
index 03b127818..a3b09e00e 100644
--- a/catchup/peerSelector.go
+++ b/catchup/peerSelector.go
@@ -18,6 +18,7 @@ package catchup
import (
"errors"
+ "math"
"sort"
"time"
@@ -38,6 +39,14 @@ const (
peerRank1LowBlockTime = 201
peerRank1HighBlockTime = 399
+ peerRankInitialThirdPriority = 400
+ peerRank2LowBlockTime = 401
+ peerRank2HighBlockTime = 599
+
+ peerRankInitialFourthPriority = 600
+ peerRank3LowBlockTime = 601
+ peerRank3HighBlockTime = 799
+
// peerRankDownloadFailed is used for responses which could be temporary, such as missing files, or such that we don't
// have clear resolution
peerRankDownloadFailed = 900
@@ -49,6 +58,9 @@ const (
// then mapped into the a ranking range.
lowBlockDownloadThreshold = 50 * time.Millisecond
highBlockDownloadThreshold = 8 * time.Second
+
+ // Is the lookback window size of peer usage statistics
+ peerHistoryWindowSize = 100
)
var errPeerSelectorNoPeerPoolsAvailable = errors.New("no peer pools available")
@@ -71,8 +83,9 @@ type peersRetriever interface {
// peerPoolEntry represents a single peer entry in the pool. It contains
// the underlying network peer as well as the peer class.
type peerPoolEntry struct {
- peer network.Peer
- class peerClass
+ peer network.Peer
+ class peerClass
+ history *historicStats
}
// peerPool is a single pool of peers that shares the same rank.
@@ -90,6 +103,151 @@ type peerSelector struct {
net peersRetriever
peerClasses []peerClass
pools []peerPool
+ counter uint64
+}
+
+// historicStats stores the past windowSize ranks for the peer passed
+// into RankPeer (i.e. no averaging or penalty). The purpose of this
+// structure is to compute the rank based on the performance of the
+// peer in the past, and to be forgiving of occasional performance
+// variations which may not be representative of the peer's overall
+// performance. It also stores the penalty history in the form or peer
+// selection gaps.
+type historicStats struct {
+ windowSize int
+ rankSamples []int
+ rankSum uint64
+ requestGaps []uint64
+ gapSum float64
+ counter uint64
+}
+
+func makeHistoricStatus(windowSize int) *historicStats {
+ // Initialize the window (rankSamples) with zeros.
+ // This way, every peer will slowly build up its profile.
+ // Otherwise, if the best peer gets a bad download the first time,
+ // that will determine the rank of the peer.
+ hs := historicStats{
+ windowSize: windowSize,
+ rankSamples: make([]int, windowSize, windowSize),
+ requestGaps: make([]uint64, 0, windowSize),
+ rankSum: 0,
+ gapSum: 0.0}
+ return &hs
+}
+
+// computerPenalty is the formula (exponential) used to calculate the
+// penalty from the sum of gaps.
+func (hs *historicStats) computerPenalty() float64 {
+ return 1 + (math.Exp(hs.gapSum/10.0) / 1000)
+}
+
+// updateRequestPenalty is given a counter, which is the most recent
+// counter for ranking a peer. It calculates newGap, which is the
+// number of counter ticks since it last was updated (i.e. last ranked
+// after being selected). The number of gaps stored is bounded by the
+// windowSize. Calculages and returns the new penalty.
+func (hs *historicStats) updateRequestPenalty(counter uint64) float64 {
+ newGap := counter - hs.counter
+ hs.counter = counter
+
+ if len(hs.requestGaps) == hs.windowSize {
+ hs.gapSum -= 1.0 / float64(hs.requestGaps[0])
+ hs.requestGaps = hs.requestGaps[1:]
+ }
+
+ hs.requestGaps = append(hs.requestGaps, newGap)
+ hs.gapSum += 1.0 / float64(newGap)
+
+ return hs.computerPenalty()
+}
+
+// resetRequestPenalty removes steps least recent gaps and recomputes the new penalty.
+// Returns the new rank calculated with the new penalty.
+// If steps it 0, it is a full reset i.e. drops or gap values.
+func (hs *historicStats) resetRequestPenalty(steps int, initialRank int, class peerClass) (rank int) {
+ if len(hs.requestGaps) == 0 {
+ return initialRank
+ }
+ // resetRequestPenalty cannot move the peer to a better class if the peer was moved
+ // to a lower class (e.g. failed downloads or invalid downloads)
+ if upperBound(class) < initialRank {
+ return initialRank
+ }
+ // if setps is 0, it is a full reset
+ if steps == 0 {
+ hs.requestGaps = make([]uint64, 0, hs.windowSize)
+ hs.gapSum = 0.0
+ return int(float64(hs.rankSum) / float64(len(hs.rankSamples)))
+ }
+
+ if steps > len(hs.requestGaps) {
+ steps = len(hs.requestGaps)
+ }
+ for s := 0; s < steps; s++ {
+ hs.gapSum -= 1.0 / float64(hs.requestGaps[s])
+ }
+ hs.requestGaps = hs.requestGaps[steps:]
+ return int(hs.computerPenalty() * (float64(hs.rankSum) / float64(len(hs.rankSamples))))
+}
+
+// push pushes a new rank to the historicStats, and returns the new
+// rank based on the average of ranks in the windowSize window and the
+// penlaty.
+func (hs *historicStats) push(value int, counter uint64, class peerClass) (averagedRank int) {
+
+ // This is the lowest ranking class, and is not subject to giving another chance.
+ // Do not modify this value with historical data.
+ if value == peerRankInvalidDownload {
+ return value
+ }
+
+ // This is a moving window. Remore the least recent value once the window is full
+ if len(hs.rankSamples) == hs.windowSize {
+ hs.rankSum -= uint64(hs.rankSamples[0])
+ hs.rankSamples = hs.rankSamples[1:]
+ }
+
+ initialRank := value
+
+ // Download may fail for various reasons. Give it additional tries
+ // and see if it recovers/improves.
+ if value == peerRankDownloadFailed {
+ // Set the rank to 10 + the class upper bound, to evict
+ // the peer from the class if it is repeatedly
+ // failing. This is to make sure to switch to the next
+ // class when all peers in this class are failing.
+ // Here, +10 is added. This is of little consequence, and the
+ // purpose is to avoid rounding errors.
+ value = upperBound(class) + 10
+ }
+
+ hs.rankSamples = append(hs.rankSamples, value)
+ hs.rankSum += uint64(value)
+
+ // The average performance of the peer
+ average := float64(hs.rankSum) / float64(len(hs.rankSamples))
+
+ if int(average) > upperBound(class) && initialRank == peerRankDownloadFailed {
+ // peerRankDownloadFailed will be delayed, to give the peer
+ // additional time to improve. If does not improve over time,
+ // the average will exceed the class limit. At this point,
+ // it will be pushed down to download failed class.
+ return peerRankDownloadFailed
+ }
+
+ // A penalty is added relative to how freequently the peer is used
+ penalty := hs.updateRequestPenalty(counter)
+
+ // The rank based on the performance and the freequency
+ avgWithPenalty := int(penalty * average)
+
+ // Keep the peer in the same class. The value passed will be
+ // within bounds (unless it is downloadFailed or
+ // invalidDownload), but the penalty may push it over. Prevent
+ // the penalty pushing it off the class bounds.
+ bounded := boundRankByClass(avgWithPenalty, class)
+ return bounded
}
// makePeerSelector creates a peerSelector, given a peersRetriever and peerClass array.
@@ -124,23 +282,29 @@ func (ps *peerSelector) GetNextPeer() (peer network.Peer, err error) {
}
// RankPeer ranks a given peer.
-// return true if the value was updated or false otherwise.
-func (ps *peerSelector) RankPeer(peer network.Peer, rank int) bool {
+// return the old value and the new updated value.
+// updated value could be different from the input rank.
+func (ps *peerSelector) RankPeer(peer network.Peer, rank int) (int, int) {
if peer == nil {
- return false
+ return -1, -1
}
ps.mu.Lock()
defer ps.mu.Unlock()
poolIdx, peerIdx := ps.findPeer(peer)
if poolIdx < 0 || peerIdx < 0 {
- return false
+ return -1, -1
}
+ sortNeeded := false
// we need to remove the peer from the pool so we can place it in a different location.
pool := ps.pools[poolIdx]
+ ps.counter++
+ initialRank := pool.rank
+ rank = pool.peers[peerIdx].history.push(rank, ps.counter, pool.peers[peerIdx].class)
if pool.rank != rank {
class := pool.peers[peerIdx].class
+ peerHistory := pool.peers[peerIdx].history
if len(pool.peers) > 1 {
pool.peers = append(pool.peers[:peerIdx], pool.peers[peerIdx+1:]...)
ps.pools[poolIdx] = pool
@@ -149,13 +313,41 @@ func (ps *peerSelector) RankPeer(peer network.Peer, rank int) bool {
ps.pools = append(ps.pools[:poolIdx], ps.pools[poolIdx+1:]...)
}
- sortNeeded := ps.addToPool(peer, rank, class)
- if sortNeeded {
- ps.sort()
- }
+ sortNeeded = ps.addToPool(peer, rank, class, peerHistory)
}
- return true
+ // Update the ranks of the peers by reducing the penalty for not beeing selected
+ for pl := len(ps.pools) - 1; pl >= 0; pl-- {
+ pool := ps.pools[pl]
+ for pr := len(pool.peers) - 1; pr >= 0; pr-- {
+ localPeer := pool.peers[pr]
+ if pool.peers[pr].peer == peer {
+ continue
+ }
+ // make the removal of penalty at a faster rate than adding it, so that the
+ // performance of the peer dominates in the evaluation over the freequency.
+ // Otherwise, the peer selection will oscillate between the good performing and
+ // a bad performing peers when sufficient penalty is accumulated to the good peer.
+ newRank := localPeer.history.resetRequestPenalty(5, pool.rank, pool.peers[pr].class)
+ if newRank != pool.rank {
+ upeer := pool.peers[pr].peer
+ class := pool.peers[pr].class
+ peerHistory := pool.peers[pr].history
+ if len(pool.peers) > 1 {
+ pool.peers = append(pool.peers[:pr], pool.peers[pr+1:]...)
+ ps.pools[pl] = pool
+ } else {
+ // the last peer was removed from the pool; delete this pool.
+ ps.pools = append(ps.pools[:pl], ps.pools[pl+1:]...)
+ }
+ sortNeeded = ps.addToPool(upeer, newRank, class, peerHistory) || sortNeeded
+ }
+ }
+ }
+ if sortNeeded {
+ ps.sort()
+ }
+ return initialRank, rank
}
// PeerDownloadDurationToRank calculates the rank for a peer given a peer and the block download time.
@@ -170,24 +362,28 @@ func (ps *peerSelector) PeerDownloadDurationToRank(peer network.Peer, blockDownl
switch ps.pools[poolIdx].peers[peerIdx].class.initialRank {
case peerRankInitialFirstPriority:
return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank0LowBlockTime, peerRank0HighBlockTime)
- default: // i.e. peerRankInitialSecondPriority
+ case peerRankInitialSecondPriority:
return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank1LowBlockTime, peerRank1HighBlockTime)
+ case peerRankInitialThirdPriority:
+ return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank2LowBlockTime, peerRank2HighBlockTime)
+ default: // i.e. peerRankInitialFourthPriority
+ return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank3LowBlockTime, peerRank3HighBlockTime)
}
}
// addToPool adds a given peer to the correct group. If no group exists for that peer's rank,
// a new group is created.
// The method return true if a new group was created ( suggesting that the pools list would need to be re-ordered ), or false otherwise.
-func (ps *peerSelector) addToPool(peer network.Peer, rank int, class peerClass) bool {
+func (ps *peerSelector) addToPool(peer network.Peer, rank int, class peerClass, peerHistory *historicStats) bool {
// see if we already have a list with that rank:
for i, pool := range ps.pools {
if pool.rank == rank {
// we found an existing group, add this peer to the list.
- ps.pools[i].peers = append(pool.peers, peerPoolEntry{peer: peer, class: class})
+ ps.pools[i].peers = append(pool.peers, peerPoolEntry{peer: peer, class: class, history: peerHistory})
return false
}
}
- ps.pools = append(ps.pools, peerPool{rank: rank, peers: []peerPoolEntry{{peer: peer, class: class}}})
+ ps.pools = append(ps.pools, peerPool{rank: rank, peers: []peerPoolEntry{{peer: peer, class: class, history: peerHistory}}})
return true
}
@@ -233,8 +429,8 @@ func (ps *peerSelector) refreshAvailablePeers() {
delete(existingPeers, peerAddress)
continue
}
- // it's an entry which we did not had before.
- sortNeeded = ps.addToPool(peer, initClass.initialRank, initClass) || sortNeeded
+ // it's an entry which we did not have before.
+ sortNeeded = ps.addToPool(peer, initClass.initialRank, initClass, makeHistoricStatus(peerHistoryWindowSize)) || sortNeeded
}
}
@@ -291,3 +487,39 @@ func downloadDurationToRank(downloadDuration, minDownloadDuration, maxDownloadDu
rank = minRank + int((downloadDuration-minDownloadDuration).Nanoseconds()*int64(maxRank-minRank)/(maxDownloadDuration-minDownloadDuration).Nanoseconds())
return
}
+
+func lowerBound(class peerClass) int {
+ switch class.initialRank {
+ case peerRankInitialFirstPriority:
+ return peerRank0LowBlockTime
+ case peerRankInitialSecondPriority:
+ return peerRank1LowBlockTime
+ case peerRankInitialThirdPriority:
+ return peerRank2LowBlockTime
+ default: // i.e. peerRankInitialFourthPriority
+ return peerRank3LowBlockTime
+ }
+}
+
+func upperBound(class peerClass) int {
+ switch class.initialRank {
+ case peerRankInitialFirstPriority:
+ return peerRank0HighBlockTime
+ case peerRankInitialSecondPriority:
+ return peerRank1HighBlockTime
+ case peerRankInitialThirdPriority:
+ return peerRank2HighBlockTime
+ default: // i.e. peerRankInitialFourthPriority
+ return peerRank3HighBlockTime
+ }
+}
+
+func boundRankByClass(rank int, class peerClass) int {
+ if rank < lowerBound(class) {
+ return lowerBound(class)
+ }
+ if rank > upperBound(class) {
+ return upperBound(class)
+ }
+ return rank
+}
diff --git a/catchup/peerSelector_test.go b/catchup/peerSelector_test.go
index 543223419..b8f1e57c3 100644
--- a/catchup/peerSelector_test.go
+++ b/catchup/peerSelector_test.go
@@ -18,12 +18,14 @@ package catchup
import (
"context"
+ "fmt"
"net/http"
"testing"
"time"
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
)
@@ -129,13 +131,15 @@ func TestPeerSelector(t *testing.T) {
// add another peer
peers = []network.Peer{&mockHTTPPeer{address: "54321"}, &mockHTTPPeer{address: "abcde"}}
- require.True(t, peerSelector.RankPeer(peer, 5))
+ r1, r2 := peerSelector.RankPeer(peer, 5)
+ require.True(t, r1 != r2)
peer, err = peerSelector.GetNextPeer()
require.NoError(t, err)
require.Equal(t, "abcde", peerAddress(peer))
- require.True(t, peerSelector.RankPeer(peer, 10))
+ r1, r2 = peerSelector.RankPeer(peer, 10)
+ require.True(t, r1 != r2)
peer, err = peerSelector.GetNextPeer()
require.NoError(t, err)
@@ -152,8 +156,10 @@ func TestPeerSelector(t *testing.T) {
require.Equal(t, errPeerSelectorNoPeerPoolsAvailable, err)
require.Nil(t, peer)
- require.False(t, peerSelector.RankPeer(nil, 10))
- require.False(t, peerSelector.RankPeer(&mockHTTPPeer{address: "abc123"}, 10))
+ r1, r2 = peerSelector.RankPeer(nil, 10)
+ require.False(t, r1 != r2 )
+ r2, r2 = peerSelector.RankPeer(&mockHTTPPeer{address: "abc123"}, 10)
+ require.False(t, r1 != r2)
return
}
@@ -218,3 +224,209 @@ func TestFindMissingPeer(t *testing.T) {
require.Equal(t, -1, poolIdx)
require.Equal(t, -1, peerIdx)
}
+
+func TestHistoricData(t *testing.T) {
+
+ peers1 := []network.Peer{&mockHTTPPeer{address: "a1"}, &mockHTTPPeer{address: "a2"}, &mockHTTPPeer{address: "a3"}}
+ peers2 := []network.Peer{&mockHTTPPeer{address: "b1"}, &mockHTTPPeer{address: "b2"}}
+
+ peerSelector := makePeerSelector(
+ makePeersRetrieverStub(func(options ...network.PeerOption) (peers []network.Peer) {
+ for _, opt := range options {
+ if opt == network.PeersPhonebookArchivers {
+ peers = append(peers, peers1...)
+ } else {
+ peers = append(peers, peers2...)
+ }
+ }
+ return
+ }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}},
+ )
+
+ var counters [5]int
+ for i := 0; i < 1000; i++ {
+ peer, getPeerErr := peerSelector.GetNextPeer()
+
+ switch peer.(*mockHTTPPeer).address {
+ case "a1":
+ counters[0]++
+ case "a2":
+ counters[1]++
+ case "a3":
+ counters[2]++
+ case "b1":
+ counters[3]++
+ case "b2":
+ counters[4]++
+ }
+
+ require.NoError(t, getPeerErr)
+ randVal := float64(crypto.RandUint64()%uint64(100)) / 100
+ randVal = randVal + 1
+ if randVal < 1.98 {
+ var duration time.Duration
+ switch peer.(*mockHTTPPeer).address {
+ case "a1":
+ duration = time.Duration(1500 * float64(time.Millisecond) * randVal)
+ case "a2":
+ duration = time.Duration(500 * float64(time.Millisecond) * randVal)
+ case "a3":
+ duration = time.Duration(100 * float64(time.Millisecond) * randVal)
+ }
+ peerRank := peerSelector.PeerDownloadDurationToRank(peer, duration)
+ peerSelector.RankPeer(peer, peerRank)
+ } else {
+ peerSelector.RankPeer(peer, peerRankDownloadFailed)
+ }
+ }
+
+ fmt.Printf("a1: %d\n", counters[0])
+ fmt.Printf("a2: %d\n", counters[1])
+ fmt.Printf("a3: %d\n", counters[2])
+ fmt.Printf("b1: %d\n", counters[3])
+ fmt.Printf("b2: %d\n", counters[4])
+ require.GreaterOrEqual(t, counters[2], counters[1])
+ require.GreaterOrEqual(t, counters[2], counters[0])
+ require.Equal(t, counters[3], 0)
+ require.Equal(t, counters[4], 0)
+}
+
+func TestPeersDownloadFailed(t *testing.T) {
+
+ peers1 := []network.Peer{&mockHTTPPeer{address: "a1"}, &mockHTTPPeer{address: "a2"}, &mockHTTPPeer{address: "a3"}}
+ peers2 := []network.Peer{&mockHTTPPeer{address: "b1"}, &mockHTTPPeer{address: "b2"}}
+
+ peerSelector := makePeerSelector(
+ makePeersRetrieverStub(func(options ...network.PeerOption) (peers []network.Peer) {
+ for _, opt := range options {
+ if opt == network.PeersPhonebookArchivers {
+ peers = append(peers, peers1...)
+ } else {
+ peers = append(peers, peers2...)
+ }
+ }
+ return
+ }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}},
+ )
+
+ var counters [5]int
+ for i := 0; i < 1000; i++ {
+ peer, getPeerErr := peerSelector.GetNextPeer()
+
+ switch peer.(*mockHTTPPeer).address {
+ case "a1":
+ counters[0]++
+ case "a2":
+ counters[1]++
+ case "a3":
+ counters[2]++
+ case "b1":
+ counters[3]++
+ case "b2":
+ counters[4]++
+ }
+
+ require.NoError(t, getPeerErr)
+
+ if i < 500 || peerAddress(peer) == "b1" || peerAddress(peer) == "b2" {
+ randVal := float64(crypto.RandUint64()%uint64(100)) / 100
+ randVal = randVal + 1
+ if randVal < 1.98 {
+ duration := time.Duration(100 * float64(time.Millisecond) * randVal)
+ peerRank := peerSelector.PeerDownloadDurationToRank(peer, duration)
+ peerSelector.RankPeer(peer, peerRank)
+ } else {
+ peerSelector.RankPeer(peer, peerRankDownloadFailed)
+ }
+ } else {
+ peerSelector.RankPeer(peer, peerRankDownloadFailed)
+ }
+ }
+
+ fmt.Printf("a1: %d\n", counters[0])
+ fmt.Printf("a2: %d\n", counters[1])
+ fmt.Printf("a3: %d\n", counters[2])
+ fmt.Printf("b1: %d\n", counters[3])
+ fmt.Printf("b2: %d\n", counters[4])
+ require.GreaterOrEqual(t, counters[3], 20)
+ require.GreaterOrEqual(t, counters[4], 20)
+
+ b1orb2 := peerAddress(peerSelector.pools[0].peers[0].peer) == "b1" || peerAddress(peerSelector.pools[0].peers[0].peer) == "b2"
+ require.True(t, b1orb2)
+ if len(peerSelector.pools) == 2 {
+ b1orb2 := peerAddress(peerSelector.pools[0].peers[1].peer) == "b1" || peerAddress(peerSelector.pools[0].peers[1].peer) == "b2"
+ require.True(t, b1orb2)
+ require.Equal(t, peerSelector.pools[1].rank, 900)
+ require.Equal(t, len(peerSelector.pools[1].peers), 3)
+ } else {
+ b1orb2 := peerAddress(peerSelector.pools[1].peers[0].peer) == "b1" || peerAddress(peerSelector.pools[1].peers[0].peer) == "b2"
+ require.True(t, b1orb2)
+ require.Equal(t, peerSelector.pools[2].rank, 900)
+ require.Equal(t, len(peerSelector.pools[2].peers), 3)
+ }
+
+}
+
+// TestPenalty tests that the penalty is calculated correctly and one peer
+// is not dominating all the selection.
+func TestPenalty(t *testing.T) {
+
+ peers1 := []network.Peer{&mockHTTPPeer{address: "a1"}, &mockHTTPPeer{address: "a2"}, &mockHTTPPeer{address: "a3"}}
+ peers2 := []network.Peer{&mockHTTPPeer{address: "b1"}, &mockHTTPPeer{address: "b2"}}
+
+ peerSelector := makePeerSelector(
+ makePeersRetrieverStub(func(options ...network.PeerOption) (peers []network.Peer) {
+ for _, opt := range options {
+ if opt == network.PeersPhonebookArchivers {
+ peers = append(peers, peers1...)
+ } else {
+ peers = append(peers, peers2...)
+ }
+ }
+ return
+ }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}},
+ )
+
+ var counters [5]int
+ for i := 0; i < 1000; i++ {
+ peer, getPeerErr := peerSelector.GetNextPeer()
+ switch peer.(*mockHTTPPeer).address {
+ case "a1":
+ counters[0]++
+ case "a2":
+ counters[1]++
+ case "a3":
+ counters[2]++
+ case "b1":
+ counters[3]++
+ case "b2":
+ counters[4]++
+ }
+
+ require.NoError(t, getPeerErr)
+ var duration time.Duration
+ switch peer.(*mockHTTPPeer).address {
+ case "a1":
+ duration = time.Duration(1500 * float64(time.Millisecond))
+ case "a2":
+ duration = time.Duration(500 * float64(time.Millisecond))
+ case "a3":
+ duration = time.Duration(100 * float64(time.Millisecond))
+ }
+ peerRank := peerSelector.PeerDownloadDurationToRank(peer, duration)
+ peerSelector.RankPeer(peer, peerRank)
+ }
+
+ fmt.Printf("a1: %d\n", counters[0])
+ fmt.Printf("a2: %d\n", counters[1])
+ fmt.Printf("a3: %d\n", counters[2])
+ fmt.Printf("b1: %d\n", counters[3])
+ fmt.Printf("b2: %d\n", counters[4])
+ require.GreaterOrEqual(t, counters[1], 50)
+ require.GreaterOrEqual(t, counters[2], 2*counters[1])
+ require.Equal(t, counters[3], 0)
+ require.Equal(t, counters[4], 0)
+}
diff --git a/catchup/pref_test.go b/catchup/pref_test.go
index 89ce25118..2bbb32d10 100644
--- a/catchup/pref_test.go
+++ b/catchup/pref_test.go
@@ -24,7 +24,6 @@ import (
"github.com/stretchr/testify/require"
- "github.com/algorand/go-algorand/components/mocks"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data"
@@ -33,6 +32,7 @@ import (
"github.com/algorand/go-algorand/data/datatest"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/rpcs"
"github.com/algorand/go-algorand/util/db"
)
@@ -45,25 +45,38 @@ func BenchmarkServiceFetchBlocks(b *testing.B) {
require.NotNil(b, remote)
require.NotNil(b, local)
- net := &mocks.MockNetwork{}
+ // Create a network and block service
+ net := &httpTestPeerSource{}
+ ls := rpcs.MakeBlockService(logging.TestingLog(b), config.GetDefaultLocal(), remote, net, "test genesisID")
+ nodeA := basicRPCNode{}
+ nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
+ nodeA.start()
+ defer nodeA.stop()
+ rootURL := nodeA.rootURL()
+ net.addPeer(rootURL)
cfg := config.GetDefaultLocal()
cfg.Archival = true
for i := 0; i < b.N; i++ {
inMem := true
- local, err := data.LoadLedger(logging.Base(), b.Name()+"empty"+strconv.Itoa(i), inMem, protocol.ConsensusCurrentVersion, genesisBalances, "", crypto.Digest{}, nil, cfg)
+ local, err := data.LoadLedger(logging.TestingLog(b), b.Name()+"empty"+strconv.Itoa(i), inMem, protocol.ConsensusCurrentVersion, genesisBalances, "", crypto.Digest{}, nil, cfg)
require.NoError(b, err)
// Make Service
- syncer := MakeService(logging.Base(), defaultConfig, net, local, new(mockedAuthenticator), nil)
- syncer.fetcherFactory = makeMockFactory(&MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int), latency: 100 * time.Millisecond, predictable: true})
-
+ syncer := MakeService(logging.TestingLog(b), defaultConfig, net, local, new(mockedAuthenticator), nil, nil)
b.StartTimer()
- syncer.sync()
+ syncer.Start()
+ for w := 0; w < 1000; w++ {
+ if remote.LastRound() == local.LastRound() {
+ break
+ }
+ time.Sleep(10 * time.Millisecond)
+ }
b.StopTimer()
- local.Close()
+ syncer.Stop()
require.Equal(b, remote.LastRound(), local.LastRound())
+ local.Close()
}
}
@@ -115,8 +128,9 @@ func benchenv(t testing.TB, numAccounts, numBlocks int) (ledger, emptyLedger *da
}
short := root.Address()
- parts[i] = part
+ parts[i] = part.Participation
genesis[short] = startamt
+ part.Close()
}
genesis[basics.Address(sinkAddr)] = basics.AccountData{
@@ -133,10 +147,10 @@ func benchenv(t testing.TB, numAccounts, numBlocks int) (ledger, emptyLedger *da
const inMem = true
cfg := config.GetDefaultLocal()
cfg.Archival = true
- emptyLedger, err = data.LoadLedger(logging.Base(), t.Name()+"empty", inMem, protocol.ConsensusCurrentVersion, genesisBalances, "", crypto.Digest{}, nil, cfg)
+ emptyLedger, err = data.LoadLedger(logging.TestingLog(t), t.Name()+"empty", inMem, protocol.ConsensusCurrentVersion, genesisBalances, "", crypto.Digest{}, nil, cfg)
require.NoError(t, err)
- ledger, err = datatest.FabricateLedger(logging.Base(), t.Name(), parts, genesisBalances, emptyLedger.LastRound()+basics.Round(numBlocks))
+ ledger, err = datatest.FabricateLedger(logging.TestingLog(t), t.Name(), parts, genesisBalances, emptyLedger.LastRound()+basics.Round(numBlocks))
require.NoError(t, err)
require.Equal(t, ledger.LastRound(), emptyLedger.LastRound()+basics.Round(numBlocks))
return ledger, emptyLedger, release, genesisBalances
diff --git a/catchup/service.go b/catchup/service.go
index dd83f05b2..21a0d3d06 100644
--- a/catchup/service.go
+++ b/catchup/service.go
@@ -28,11 +28,13 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/logging/telemetryspec"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/execpool"
)
const catchupPeersForSync = 10
@@ -56,22 +58,24 @@ type Ledger interface {
LastRound() basics.Round
Block(basics.Round) (bookkeeping.Block, error)
IsWritingCatchpointFile() bool
+ Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledger.ValidatedBlock, error)
+ AddValidatedBlock(vb ledger.ValidatedBlock, cert agreement.Certificate) error
}
// Service represents the catchup service. Once started and until it is stopped, it ensures that the ledger is up to date with network.
type Service struct {
- syncStartNS int64 // at top of struct to keep 64 bit aligned for atomic.* ops
- cfg config.Local
- ledger Ledger
- fetcherFactory FetcherFactory
- ctx context.Context
- cancel func()
- done chan struct{}
- log logging.Logger
- net network.GossipNode
- auth BlockAuthenticator
- parallelBlocks uint64
- deadlineTimeout time.Duration
+ syncStartNS int64 // at top of struct to keep 64 bit aligned for atomic.* ops
+ cfg config.Local
+ ledger Ledger
+ ctx context.Context
+ cancel func()
+ done chan struct{}
+ log logging.Logger
+ net network.GossipNode
+ auth BlockAuthenticator
+ parallelBlocks uint64
+ deadlineTimeout time.Duration
+ blockValidationPool execpool.BacklogPool
// suspendForCatchpointWriting defines whether we've ran into a state where the ledger is currently busy writing the
// catchpoint file. If so, we want to suspend the catchup process until the catchpoint file writing is complete,
@@ -85,8 +89,6 @@ type Service struct {
protocolErrorLogged bool
lastSupportedRound basics.Round
unmatchedPendingCertificates <-chan PendingUnmatchedCertificate
-
- latestRoundFetcherFactory FetcherFactory
}
// A BlockAuthenticator authenticates blocks given a certificate.
@@ -102,19 +104,19 @@ type BlockAuthenticator interface {
}
// MakeService creates a catchup service instance from its constituent components
-func MakeService(log logging.Logger, config config.Local, net network.GossipNode, ledger Ledger, auth BlockAuthenticator, unmatchedPendingCertificates <-chan PendingUnmatchedCertificate) (s *Service) {
+func MakeService(log logging.Logger, config config.Local, net network.GossipNode, ledger Ledger, auth BlockAuthenticator, unmatchedPendingCertificates <-chan PendingUnmatchedCertificate, blockValidationPool execpool.BacklogPool) (s *Service) {
s = &Service{}
s.cfg = config
- s.fetcherFactory = MakeNetworkFetcherFactory(net, catchupPeersForSync, &config)
s.ledger = ledger
s.net = net
s.auth = auth
s.unmatchedPendingCertificates = unmatchedPendingCertificates
- s.latestRoundFetcherFactory = MakeNetworkFetcherFactory(net, blockQueryPeerLimit, &config)
s.log = log.With("Context", "sync")
s.parallelBlocks = config.CatchupParallelBlocks
s.deadlineTimeout = agreement.DeadlineTimeout()
+ s.blockValidationPool = blockValidationPool
+
return s
}
@@ -155,8 +157,9 @@ func (s *Service) SynchronizingTime() time.Duration {
}
// function scope to make a bunch of defer statements better
-func (s *Service) innerFetch(fetcher Fetcher, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, rpcc FetcherClient, err error) {
+func (s *Service) innerFetch(r basics.Round, peer network.Peer) (blk *bookkeeping.Block, cert *agreement.Certificate, ddur time.Duration, err error) {
ctx, cf := context.WithCancel(s.ctx)
+ fetcher := makeUniversalBlockFetcher(s.log, s.net, s.cfg)
defer cf()
stopWaitingForLedgerRound := make(chan struct{})
defer close(stopWaitingForLedgerRound)
@@ -167,15 +170,15 @@ func (s *Service) innerFetch(fetcher Fetcher, r basics.Round) (blk *bookkeeping.
cf()
}
}()
- return fetcher.FetchBlock(ctx, r)
+ return fetcher.fetchBlock(ctx, r, peer)
}
// fetchAndWrite fetches a block, checks the cert, and writes it to the ledger. Cert checking and ledger writing both wait for the ledger to advance if necessary.
// Returns false if we couldn't fetch or write (i.e., if we failed even after a given number of retries or if we were told to abort.)
-func (s *Service) fetchAndWrite(fetcher Fetcher, r basics.Round, prevFetchCompleteChan chan bool, lookbackComplete chan bool) bool {
+func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool, lookbackComplete chan bool, peerSelector *peerSelector) bool {
i := 0
hasLookback := false
- for !fetcher.OutOfPeers(r) {
+ for true {
i++
select {
case <-s.ctx.Done():
@@ -186,16 +189,37 @@ func (s *Service) fetchAndWrite(fetcher Fetcher, r basics.Round, prevFetchComple
// Stop retrying after a while.
if i > catchupRetryLimit {
- s.log.Errorf("fetchAndWrite: block retrieval exceeded retry limit")
+ loggedMessage := fmt.Sprintf("fetchAndWrite(%d): block retrieval exceeded retry limit", r)
+ if _, initialSync := s.IsSynchronizing(); initialSync {
+ // on the initial sync, it's completly expected that we won't be able to get all the "next" blocks.
+ // Therefore info should suffice.
+ s.log.Info(loggedMessage)
+ } else {
+ // On any subsequent sync, we migth be looking for multiple rounds into the future, so it's completly
+ // reasonable that we would fail retrieving the future block.
+ // Generate a warning here only if we're failing to retrieve X+1 or below.
+ // All other block retrievals should not generate a warning.
+ if r > s.ledger.NextRound() {
+ s.log.Info(loggedMessage)
+ } else {
+ s.log.Warn(loggedMessage)
+ }
+ }
return false
}
- // Try to fetch, timing out after retryInterval
+ peer, getPeerErr := peerSelector.GetNextPeer()
+ if getPeerErr != nil {
+ s.log.Debugf("fetchAndWrite: was unable to obtain a peer to retrieve the block from")
+ break
+ }
- block, cert, client, err := s.innerFetch(fetcher, r)
+ // Try to fetch, timing out after retryInterval
+ block, cert, blockDownloadDuration, err := s.innerFetch(r, peer)
if err != nil {
s.log.Debugf("fetchAndWrite(%v): Could not fetch: %v (attempt %d)", r, err, i)
+ peerSelector.RankPeer(peer, peerRankDownloadFailed)
// we've just failed to retrieve a block; wait until the previous block is fetched before trying again
// to avoid the usecase where the first block doesn't exists and we're making many requests down the chain
// for no reason.
@@ -219,17 +243,18 @@ func (s *Service) fetchAndWrite(fetcher Fetcher, r basics.Round, prevFetchComple
s.log.Debugf("fetchAndWrite(%v): Got block and cert contents: %v %v", r, block, cert)
// Check that the block's contents match the block header (necessary with an untrusted block because b.Hash() only hashes the header)
- if !block.ContentsMatchHeader() {
- // Check if this mismatch is due to an unsupported protocol version
- if _, ok := config.Consensus[block.BlockHeader.CurrentProtocol]; !ok {
- s.log.Errorf("fetchAndWrite(%v): unsupported protocol version detected: '%v'", r, block.BlockHeader.CurrentProtocol)
- client.Close()
- return false
- }
+ if s.cfg.CatchupVerifyPaysetHash() {
+ if !block.ContentsMatchHeader() {
+ peerSelector.RankPeer(peer, peerRankInvalidDownload)
+ // Check if this mismatch is due to an unsupported protocol version
+ if _, ok := config.Consensus[block.BlockHeader.CurrentProtocol]; !ok {
+ s.log.Errorf("fetchAndWrite(%v): unsupported protocol version detected: '%v'", r, block.BlockHeader.CurrentProtocol)
+ return false
+ }
- s.log.Warnf("fetchAndWrite(%v): block contents do not match header (attempt %d)", r, i)
- client.Close()
- continue // retry the fetch
+ s.log.Warnf("fetchAndWrite(%v): block contents do not match header (attempt %d)", r, i)
+ continue // retry the fetch
+ }
}
// make sure that we have the lookBack block that's required for authenticating this block
@@ -245,14 +270,19 @@ func (s *Service) fetchAndWrite(fetcher Fetcher, r basics.Round, prevFetchComple
}
}
}
-
- err = s.auth.Authenticate(block, cert)
- if err != nil {
- s.log.Warnf("fetchAndWrite(%v): cert did not authenticate block (attempt %d): %v", r, i, err)
- client.Close()
- continue // retry the fetch
+ if s.cfg.CatchupVerifyCertificate() {
+ err = s.auth.Authenticate(block, cert)
+ if err != nil {
+ s.log.Warnf("fetchAndWrite(%v): cert did not authenticate block (attempt %d): %v", r, i, err)
+ peerSelector.RankPeer(peer, peerRankInvalidDownload)
+ continue // retry the fetch
+ }
}
+ peerRank := peerSelector.PeerDownloadDurationToRank(peer, blockDownloadDuration)
+ r1, r2 := peerSelector.RankPeer(peer, peerRank)
+ s.log.Debugf("fetchAndWrite(%d): ranked peer with %d from %d to %d", r, peerRank, r1, r2)
+
// Write to ledger, noting that ledger writes must be in order
select {
case <-s.ctx.Done():
@@ -275,7 +305,21 @@ func (s *Service) fetchAndWrite(fetcher Fetcher, r basics.Round, prevFetchComple
return false
}
- err = s.ledger.AddBlock(*block, *cert)
+ if s.cfg.CatchupVerifyTransactionSignatures() || s.cfg.CatchupVerifyApplyData() {
+ vb, err := s.ledger.Validate(s.ctx, *block, s.blockValidationPool)
+ if err != nil {
+ if s.ctx.Err() != nil {
+ // if the context expired, just exit.
+ return false
+ }
+ s.log.Warnf("fetchAndWrite(%d): failed to validate block : %v", r, err)
+ return false
+ }
+ err = s.ledger.AddValidatedBlock(*vb, *cert)
+ } else {
+ err = s.ledger.AddBlock(*block, *cert)
+ }
+
if err != nil {
switch err.(type) {
case ledgercore.BlockInLedgerError:
@@ -304,9 +348,9 @@ func (s *Service) fetchAndWrite(fetcher Fetcher, r basics.Round, prevFetchComple
type task func() basics.Round
-func (s *Service) pipelineCallback(fetcher Fetcher, r basics.Round, thisFetchComplete chan bool, prevFetchCompleteChan chan bool, lookbackChan chan bool) func() basics.Round {
+func (s *Service) pipelineCallback(r basics.Round, thisFetchComplete chan bool, prevFetchCompleteChan chan bool, lookbackChan chan bool, peerSelector *peerSelector) func() basics.Round {
return func() basics.Round {
- fetchResult := s.fetchAndWrite(fetcher, r, prevFetchCompleteChan, lookbackChan)
+ fetchResult := s.fetchAndWrite(r, prevFetchCompleteChan, lookbackChan, peerSelector)
// the fetch result will be read at most twice (once as the lookback block and once as the prev block, so we write the result twice)
thisFetchComplete <- fetchResult
@@ -322,14 +366,6 @@ func (s *Service) pipelineCallback(fetcher Fetcher, r basics.Round, thisFetchCom
// TODO the following code does not handle the following case: seedLookback upgrades during fetch
func (s *Service) pipelinedFetch(seedLookback uint64) {
- fetcher := s.fetcherFactory.NewOverGossip()
- defer fetcher.Close()
-
- // make sure that we have at least one peer
- if fetcher.NumPeers() == 0 {
- return
- }
-
parallelRequests := s.parallelBlocks
if parallelRequests < seedLookback {
parallelRequests = seedLookback
@@ -345,6 +381,13 @@ func (s *Service) pipelinedFetch(seedLookback uint64) {
close(completed)
}()
+ peerSelector := s.createPeerSelector(true)
+
+ if _, err := peerSelector.GetNextPeer(); err == errPeerSelectorNoPeerPoolsAvailable {
+ s.log.Debugf("pipelinedFetch: was unable to obtain a peer to retrieve the block from")
+ return
+ }
+
// Invariant: len(taskCh) + (# pending writes to completed) <= N
wg.Add(int(parallelRequests))
for i := uint64(0); i < parallelRequests; i++ {
@@ -391,7 +434,7 @@ func (s *Service) pipelinedFetch(seedLookback uint64) {
currentRoundComplete := make(chan bool, 2)
// len(taskCh) + (# pending writes to completed) increases by 1
- taskCh <- s.pipelineCallback(fetcher, nextRound, currentRoundComplete, recentReqs[len(recentReqs)-1], recentReqs[len(recentReqs)-int(seedLookback)])
+ taskCh <- s.pipelineCallback(nextRound, currentRoundComplete, recentReqs[len(recentReqs)-1], recentReqs[len(recentReqs)-int(seedLookback)], peerSelector)
recentReqs = append(recentReqs[1:], currentRoundComplete)
}
@@ -423,7 +466,7 @@ func (s *Service) pipelinedFetch(seedLookback uint64) {
currentRoundComplete := make(chan bool, 2)
// len(taskCh) + (# pending writes to completed) increases by 1
- taskCh <- s.pipelineCallback(fetcher, nextRound, currentRoundComplete, recentReqs[len(recentReqs)-1], recentReqs[0])
+ taskCh <- s.pipelineCallback(nextRound, currentRoundComplete, recentReqs[len(recentReqs)-1], recentReqs[0], peerSelector)
recentReqs = append(recentReqs[1:], currentRoundComplete)
nextRound++
}
@@ -436,15 +479,8 @@ func (s *Service) pipelinedFetch(seedLookback uint64) {
// periodicSync periodically asks the network for its latest round and syncs if we've fallen behind (also if our ledger stops advancing)
func (s *Service) periodicSync() {
defer close(s.done)
- // wait until network is ready, or until we're told to quit
- select {
- case <-s.net.Ready():
- s.log.Info("network ready")
- case <-s.ctx.Done():
- return
- }
// if the catchup is disabled in the config file, just skip it.
- if s.parallelBlocks != 0 {
+ if s.parallelBlocks != 0 && !s.cfg.DisableNetworking {
s.sync()
}
stuckInARow := 0
@@ -462,7 +498,7 @@ func (s *Service) periodicSync() {
sleepDuration = time.Duration(crypto.RandUint63()) % s.deadlineTimeout
continue
case <-time.After(sleepDuration):
- if sleepDuration < s.deadlineTimeout {
+ if sleepDuration < s.deadlineTimeout || s.cfg.DisableNetworking {
sleepDuration = s.deadlineTimeout
continue
}
@@ -480,6 +516,10 @@ func (s *Service) periodicSync() {
s.sync()
case cert := <-s.unmatchedPendingCertificates:
// the agreement service has a valid certificate for a block, but not the block itself.
+ if s.cfg.DisableNetworking {
+ s.log.Warnf("the local node is missing block %d, however, the catchup would not be able to provide it when the network is disabled.", cert.Cert.Round)
+ continue
+ }
s.syncCert(&cert)
}
@@ -557,20 +597,17 @@ func (s *Service) syncCert(cert *PendingUnmatchedCertificate) {
// TODO this doesn't actually use the digest from cert!
func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.AsyncVoteVerifier) {
blockHash := bookkeeping.BlockHash(cert.Proposal.BlockDigest) // semantic digest (i.e., hash of the block header), not byte-for-byte digest
- fetcher := s.latestRoundFetcherFactory.NewOverGossip()
- defer func() {
- fetcher.Close()
- }()
+ peerSelector := s.createPeerSelector(false)
for s.ledger.LastRound() < cert.Round {
- if fetcher.OutOfPeers(cert.Round) {
- fetcher.Close()
- // refresh peers and try again
- logging.Base().Warn("fetchRound found no outgoing peers")
+ peer, getPeerErr := peerSelector.GetNextPeer()
+ if getPeerErr != nil {
+ s.log.Debugf("fetchRound: was unable to obtain a peer to retrieve the block from")
s.net.RequestConnectOutgoing(true, s.ctx.Done())
- fetcher = s.latestRoundFetcherFactory.NewOverGossip()
+ continue
}
+
// Ask the fetcher to get the block somehow
- block, fetchedCert, rpcc, err := s.innerFetch(fetcher, cert.Round)
+ block, fetchedCert, _, err := s.innerFetch(cert.Round, peer)
if err != nil {
select {
@@ -580,9 +617,9 @@ func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.Asy
default:
}
logging.Base().Warnf("fetchRound could not acquire block, fetcher errored out: %v", err)
+ peerSelector.RankPeer(peer, peerRankDownloadFailed)
continue
}
- rpcc.Close()
if block.Hash() == blockHash && block.ContentsMatchHeader() {
s.ledger.EnsureBlock(block, cert)
@@ -590,6 +627,7 @@ func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.Asy
}
// Otherwise, fetcher gave us the wrong block
logging.Base().Warnf("fetcher gave us bad/wrong block (for round %d): fetched hash %v; want hash %v", cert.Round, block.Hash(), blockHash)
+ peerSelector.RankPeer(peer, peerRankInvalidDownload)
// As a failsafe, if the cert we fetched is valid but for the wrong block, panic as loudly as possible
if cert.Round == fetchedCert.Round &&
@@ -659,3 +697,69 @@ func (s *Service) handleUnsupportedRound(nextUnsupportedRound basics.Round) {
s.cancel()
}
}
+
+func (s *Service) createPeerSelector(pipelineFetch bool) *peerSelector {
+ var peerClasses []peerClass
+ if s.cfg.EnableCatchupFromArchiveServers {
+ if pipelineFetch {
+ if s.cfg.NetAddress != "" { // Relay node
+ peerClasses = []peerClass{
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivers},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
+ {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersConnectedIn},
+ }
+ } else {
+ peerClasses = []peerClass{
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedOut},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
+ }
+ }
+ } else {
+ if s.cfg.NetAddress != "" { // Relay node
+ peerClasses = []peerClass{
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedIn},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
+ {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookArchivers},
+ }
+ } else {
+ peerClasses = []peerClass{
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivers},
+ }
+ }
+ }
+ } else {
+ if pipelineFetch {
+ if s.cfg.NetAddress != "" { // Relay node
+ peerClasses = []peerClass{
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersConnectedIn},
+ }
+ } else {
+ peerClasses = []peerClass{
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
+ }
+ }
+ } else {
+ if s.cfg.NetAddress != "" { // Relay node
+ peerClasses = []peerClass{
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedIn},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
+ }
+ } else {
+ peerClasses = []peerClass{
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
+ }
+ }
+ }
+ }
+ return makePeerSelector(s.net, peerClasses)
+}
diff --git a/catchup/service_test.go b/catchup/service_test.go
index 417801276..32e22c02c 100644
--- a/catchup/service_test.go
+++ b/catchup/service_test.go
@@ -28,78 +28,38 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/agreement"
- "github.com/algorand/go-algorand/components/mocks"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/committee"
+ "github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/rpcs"
+ "github.com/algorand/go-algorand/util/execpool"
)
var defaultConfig = config.GetDefaultLocal()
var poolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
var sinkAddr = basics.Address{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21}
-type MockedFetcherFactory struct {
- fetcher *MockedFetcher
- mu deadlock.Mutex
-}
-
-// a lock just to sync swapping internal fetchers
-func makeMockFactory(fetcher *MockedFetcher) *MockedFetcherFactory {
- var factory MockedFetcherFactory
- factory.fetcher = fetcher
- return &factory
-}
-
-func (factory *MockedFetcherFactory) New() Fetcher {
- factory.mu.Lock()
- defer factory.mu.Unlock()
- return factory.fetcher
-}
-
-func (factory *MockedFetcherFactory) NewOverGossip() Fetcher {
- return factory.New()
-}
-
-func (factory *MockedFetcherFactory) changeFetcher(fetcher *MockedFetcher) {
- factory.mu.Lock()
- defer factory.mu.Unlock()
- factory.fetcher = fetcher
-}
-
-type MockClient struct {
- once sync.Once
- closed bool
-}
-
-func (*MockClient) Address() string {
- return "mock.address."
-}
-func (c *MockClient) Close() error {
- c.once.Do(func() {
- c.closed = true
- })
- return nil
-}
-func (c *MockClient) GetBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) {
- return nil, nil
-}
-
-// Mocked Fetcher
+// Mocked Fetcher will mock UniversalFetcher
type MockedFetcher struct {
ledger Ledger
timeout bool
tries map[basics.Round]int
- client MockClient
latency time.Duration
predictable bool
mu deadlock.Mutex
}
-func (m *MockedFetcher) FetchBlock(ctx context.Context, round basics.Round) (*bookkeeping.Block, *agreement.Certificate, FetcherClient, error) {
+func (m *MockedFetcher) fetchBlock(ctx context.Context, round basics.Round, peer network.Peer) (*bookkeeping.Block, *agreement.Certificate, time.Duration, error) {
+ if m.OutOfPeers(round) {
+ return nil, nil, time.Duration(0), nil
+ }
if m.timeout {
time.Sleep(time.Duration(config.GetDefaultLocal().CatchupHTTPBlockFetchTimeoutSec)*time.Second + time.Second)
}
@@ -111,14 +71,14 @@ func (m *MockedFetcher) FetchBlock(ctx context.Context, round basics.Round) (*bo
}
block, err := m.ledger.Block(round)
if round > m.ledger.LastRound() {
- return nil, nil, nil, errors.New("no block")
+ return nil, nil, time.Duration(0), errors.New("no block")
} else if err != nil {
panic(err)
}
var cert agreement.Certificate
cert.Proposal.BlockDigest = block.Digest()
- return &block, &cert, &m.client, nil
+ return &block, &cert, time.Duration(0), nil
}
func (m *MockedFetcher) NumPeers() int {
@@ -168,51 +128,81 @@ func (auth *mockedAuthenticator) alter(errorRound int, fail bool) {
}
func TestServiceFetchBlocksSameRange(t *testing.T) {
- // Make Ledger
- remote, local := testingenv(t, 10)
+ // Make Ledgers
+ local := new(mockedLedger)
+ local.blocks = append(local.blocks, bookkeeping.Block{})
+
+ remote, _, blk, err := buildTestLedger(t, bookkeeping.Block{})
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+ addBlocks(t, remote, blk, 10)
- require.NotNil(t, remote)
- require.NotNil(t, local)
+ // Create a network and block service
+ blockServiceConfig := config.GetDefaultLocal()
+ net := &httpTestPeerSource{}
+ ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID")
- net := &mocks.MockNetwork{}
+ nodeA := basicRPCNode{}
+ nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
+ nodeA.start()
+ defer nodeA.stop()
+ rootURL := nodeA.rootURL()
+ net.addPeer(rootURL)
// Make Service
- syncer := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil)
- syncer.fetcherFactory = makeMockFactory(&MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)})
+ syncer := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil, nil)
syncer.testStart()
syncer.sync()
-
- require.Equal(t, remote.LastRound(), local.LastRound())
+ rr, lr := remote.LastRound(), local.LastRound()
+ require.Equal(t, rr, lr)
}
func TestPeriodicSync(t *testing.T) {
// Make Ledger
- remote, local := testingenv(t, 10)
+ local := new(mockedLedger)
+ local.blocks = append(local.blocks, bookkeeping.Block{})
+
+ remote, _, blk, err := buildTestLedger(t, bookkeeping.Block{})
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+ addBlocks(t, remote, blk, 10)
+
+ // Create a network and block service
+ blockServiceConfig := config.GetDefaultLocal()
+ net := &httpTestPeerSource{}
+ ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID")
+
+ nodeA := basicRPCNode{}
+ nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
+ nodeA.start()
+ defer nodeA.stop()
+ rootURL := nodeA.rootURL()
+ net.addPeer(rootURL)
auth := &mockedAuthenticator{fail: true}
initialLocalRound := local.LastRound()
require.True(t, 0 == initialLocalRound)
// Make Service
- s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, auth, nil)
+ s := MakeService(logging.Base(), defaultConfig, net, local, auth, nil, nil)
s.deadlineTimeout = 2 * time.Second
- factory := MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}}
- s.fetcherFactory = &factory
- require.True(t, initialLocalRound < remote.LastRound())
-
s.Start()
defer s.Stop()
time.Sleep(s.deadlineTimeout*2 - 200*time.Millisecond)
require.Equal(t, initialLocalRound, local.LastRound())
auth.alter(-1, false)
- s.fetcherFactory.(*MockedFetcherFactory).changeFetcher(&MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)})
time.Sleep(2 * time.Second)
// Asserts that the last block is the one we expect
- require.Equal(t, remote.LastRound(), local.LastRound())
- for r := basics.Round(0); r < remote.LastRound(); r++ {
+ rr, lr := remote.LastRound(), local.LastRound()
+ require.Equal(t, rr, lr)
+ for r := basics.Round(1); r < remote.LastRound(); r++ {
localBlock, err := local.Block(r)
require.NoError(t, err)
remoteBlock, err := remote.Block(r)
@@ -224,18 +214,33 @@ func TestPeriodicSync(t *testing.T) {
func TestServiceFetchBlocksOneBlock(t *testing.T) {
// Make Ledger
numBlocks := 10
- remote, local := testingenv(t, numBlocks)
+ local := new(mockedLedger)
+ local.blocks = append(local.blocks, bookkeeping.Block{})
lastRoundAtStart := local.LastRound()
- net := &mocks.MockNetwork{}
+ remote, _, blk, err := buildTestLedger(t, bookkeeping.Block{})
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+ addBlocks(t, remote, blk, numBlocks-1)
+
+ // Create a network and block service
+ blockServiceConfig := config.GetDefaultLocal()
+ net := &httpTestPeerSource{}
+ ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID")
+
+ nodeA := basicRPCNode{}
+ nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
+ nodeA.start()
+ defer nodeA.stop()
+ rootURL := nodeA.rootURL()
+ net.addPeer(rootURL)
// Make Service
- s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil)
- factory := MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}}
- s.fetcherFactory = &factory
+ s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil, nil)
// Get last round
- require.False(t, factory.fetcher.client.closed)
// Start the service ( dummy )
s.testStart()
@@ -245,12 +250,13 @@ func TestServiceFetchBlocksOneBlock(t *testing.T) {
// Asserts that the last block is the one we expect
require.Equal(t, lastRoundAtStart+basics.Round(numBlocks), local.LastRound())
- require.False(t, factory.fetcher.client.closed)
// Get the same block we wrote
- block, _, client, err := factory.New().FetchBlock(context.Background(), lastRoundAtStart+1)
+ block, _, _, err := makeUniversalBlockFetcher(logging.Base(),
+ net,
+ defaultConfig).fetchBlock(context.Background(), lastRoundAtStart+1, net.peers[0])
+
require.NoError(t, err)
- require.False(t, client.(*MockClient).closed)
//Check we wrote the correct block
localBlock, err := local.Block(lastRoundAtStart + 1)
@@ -258,6 +264,9 @@ func TestServiceFetchBlocksOneBlock(t *testing.T) {
require.Equal(t, *block, localBlock)
}
+// TestAbruptWrites emulates the fact that the agreement can also generate new rounds
+// When caught up, and the agreement service is taking the lead, the sync() stops and
+// yields to the agreement. Agreement is emulated by the go func() loop in the test
func TestAbruptWrites(t *testing.T) {
numberOfBlocks := 100
@@ -266,18 +275,35 @@ func TestAbruptWrites(t *testing.T) {
}
// Make Ledger
- remote, local := testingenv(t, numberOfBlocks)
+ local := new(mockedLedger)
+ local.blocks = append(local.blocks, bookkeeping.Block{})
lastRound := local.LastRound()
+ remote, _, blk, err := buildTestLedger(t, bookkeeping.Block{})
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+ addBlocks(t, remote, blk, numberOfBlocks-1)
+
+ // Create a network and block service
+ blockServiceConfig := config.GetDefaultLocal()
+ net := &httpTestPeerSource{}
+ ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID")
+
+ nodeA := basicRPCNode{}
+ nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
+ nodeA.start()
+ defer nodeA.stop()
+ rootURL := nodeA.rootURL()
+ net.addPeer(rootURL)
+
// Make Service
- s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: -1}, nil)
- factory := MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}}
- s.fetcherFactory = &factory
+ s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil, nil)
var wg sync.WaitGroup
wg.Add(1)
- defer wg.Wait()
go func() {
defer wg.Done()
for i := basics.Round(lastRound + 1); i <= basics.Round(numberOfBlocks); i++ {
@@ -295,6 +321,7 @@ func TestAbruptWrites(t *testing.T) {
s.testStart()
s.sync()
+ wg.Wait()
require.Equal(t, remote.LastRound(), local.LastRound())
}
@@ -304,12 +331,33 @@ func TestServiceFetchBlocksMultiBlocks(t *testing.T) {
if testing.Short() {
numberOfBlocks = basics.Round(10)
}
- remote, local := testingenv(t, int(numberOfBlocks))
+ local := new(mockedLedger)
+ local.blocks = append(local.blocks, bookkeeping.Block{})
+
lastRoundAtStart := local.LastRound()
+ remote, _, blk, err := buildTestLedger(t, bookkeeping.Block{})
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+ addBlocks(t, remote, blk, int(numberOfBlocks)-1)
+
+ // Create a network and block service
+ blockServiceConfig := config.GetDefaultLocal()
+ net := &httpTestPeerSource{}
+ ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID")
+
+ nodeA := basicRPCNode{}
+ nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
+ nodeA.start()
+ defer nodeA.stop()
+ rootURL := nodeA.rootURL()
+ net.addPeer(rootURL)
+
// Make Service
- syncer := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: -1}, nil)
- syncer.fetcherFactory = &MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}}
+ syncer := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil, nil)
+ fetcher := makeUniversalBlockFetcher(logging.Base(), net, defaultConfig)
// Start the service ( dummy )
syncer.testStart()
@@ -322,9 +370,8 @@ func TestServiceFetchBlocksMultiBlocks(t *testing.T) {
for i := basics.Round(1); i <= numberOfBlocks; i++ {
// Get the same block we wrote
- blk, _, client, err2 := syncer.fetcherFactory.New().FetchBlock(context.Background(), i)
+ blk, _, _, err2 := fetcher.fetchBlock(context.Background(), i, net.GetPeers()[0])
require.NoError(t, err2)
- require.False(t, client.(*MockClient).closed)
// Check we wrote the correct block
localBlock, err := local.Block(i)
@@ -336,19 +383,41 @@ func TestServiceFetchBlocksMultiBlocks(t *testing.T) {
func TestServiceFetchBlocksMalformed(t *testing.T) {
// Make Ledger
- remote, local := testingenv(t, 10)
+ numBlocks := 10
+ local := new(mockedLedger)
+ local.blocks = append(local.blocks, bookkeeping.Block{})
lastRoundAtStart := local.LastRound()
+
+ remote, _, blk, err := buildTestLedger(t, bookkeeping.Block{})
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+ addBlocks(t, remote, blk, numBlocks-1)
+
+ // Create a network and block service
+ blockServiceConfig := config.GetDefaultLocal()
+ net := &httpTestPeerSource{}
+ ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID")
+
+ nodeA := basicRPCNode{}
+ nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
+ nodeA.start()
+ defer nodeA.stop()
+ rootURL := nodeA.rootURL()
+ net.addPeer(rootURL)
+
// Make Service
- s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: int(lastRoundAtStart + 1)}, nil)
- s.fetcherFactory = &MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}}
+ s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: int(lastRoundAtStart + 1)}, nil, nil)
// Start the service ( dummy )
s.testStart()
s.sync()
require.Equal(t, lastRoundAtStart, local.LastRound())
- require.True(t, s.fetcherFactory.(*MockedFetcherFactory).fetcher.client.closed)
+ // maybe check all peers/clients are closed here?
+ //require.True(t, s.fetcherFactory.(*MockedFetcherFactory).fetcher.client.closed)
}
func TestOnSwitchToUnSupportedProtocol(t *testing.T) {
@@ -367,6 +436,7 @@ func TestOnSwitchToUnSupportedProtocol(t *testing.T) {
// i.e. rounds 1 and 2 may be simultaneously fetched.
require.Less(t, int(local.LastRound()), 3)
require.Equal(t, lastRoundRemote, int(remote.LastRound()))
+ remote.Ledger.Close()
}
// Test the interruption in "the rest" loop
@@ -382,6 +452,7 @@ func TestOnSwitchToUnSupportedProtocol(t *testing.T) {
}
require.Equal(t, lastRoundLocal, int(local.LastRound()))
require.Equal(t, lastRoundRemote, int(remote.LastRound()))
+ remote.Ledger.Close()
}
// Test the interruption with short notice (less than
@@ -405,6 +476,7 @@ func TestOnSwitchToUnSupportedProtocol(t *testing.T) {
// fetched.
require.Less(t, int(local.LastRound()), lastRoundLocal+2)
require.Equal(t, lastRoundRemote, int(remote.LastRound()))
+ remote.Ledger.Close()
}
// Test the interruption with short notice (less than
@@ -436,6 +508,7 @@ func TestOnSwitchToUnSupportedProtocol(t *testing.T) {
// ledger, round 8 will not be fetched.
require.Equal(t, int(local.LastRound()), lastRoundLocal)
require.Equal(t, lastRoundRemote, int(remote.LastRound()))
+ remote.Ledger.Close()
}
}
@@ -444,7 +517,7 @@ func helperTestOnSwitchToUnSupportedProtocol(
lastRoundRemote,
lastRoundLocal,
roundWithSwitchOn,
- roundsToCopy int) (local, remote Ledger) {
+ roundsToCopy int) (Ledger, *data.Ledger) {
// Make Ledger
mRemote, mLocal := testingenvWithUpgrade(t, lastRoundRemote, roundWithSwitchOn, lastRoundLocal+1)
@@ -454,16 +527,37 @@ func helperTestOnSwitchToUnSupportedProtocol(
mLocal.blocks = append(mLocal.blocks, mRemote.blocks[r])
}
- local = mLocal
- remote = Ledger(mRemote)
+ local := mLocal
+
config := defaultConfig
config.CatchupParallelBlocks = 2
+ remote, _, blk, err := buildTestLedger(t, bookkeeping.Block{}) //mRemote.blocks[0])
+ if err != nil {
+ t.Fatal(err)
+ return local, remote
+ }
+ for i := 1; i < lastRoundRemote; i++ {
+ blk.NextProtocolSwitchOn = mRemote.blocks[i].NextProtocolSwitchOn
+ blk.NextProtocol = mRemote.blocks[i].NextProtocol
+ addBlocks(t, remote, blk, 1)
+ blk.BlockHeader.Round++
+ }
+
+ // Create a network and block service
+ net := &httpTestPeerSource{}
+ ls := rpcs.MakeBlockService(logging.Base(), config, remote, net, "test genesisID")
+
+ nodeA := basicRPCNode{}
+ nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
+ nodeA.start()
+ defer nodeA.stop()
+ rootURL := nodeA.rootURL()
+ net.addPeer(rootURL)
+
// Make Service
- s := MakeService(logging.Base(), config, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: -1}, nil)
+ s := MakeService(logging.Base(), config, net, local, &mockedAuthenticator{errorRound: -1}, nil, nil)
s.deadlineTimeout = 2 * time.Second
-
- s.fetcherFactory = &MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}}
s.Start()
defer s.Stop()
@@ -516,6 +610,14 @@ func (m *mockedLedger) AddBlock(blk bookkeeping.Block, cert agreement.Certificat
return nil
}
+func (m *mockedLedger) Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledger.ValidatedBlock, error) {
+ return nil, nil
+}
+
+func (m *mockedLedger) AddValidatedBlock(vb ledger.ValidatedBlock, cert agreement.Certificate) error {
+ return nil
+}
+
func (m *mockedLedger) ConsensusParams(r basics.Round) (config.ConsensusParams, error) {
m.mu.Lock()
defer m.mu.Unlock()
@@ -576,23 +678,6 @@ func (m *mockedLedger) IsWritingCatchpointFile() bool {
return false
}
-func testingenv(t testing.TB, numBlocks int) (ledger, emptyLedger Ledger) {
- mLedger := new(mockedLedger)
- mEmptyLedger := new(mockedLedger)
-
- var blk bookkeeping.Block
- blk.CurrentProtocol = protocol.ConsensusCurrentVersion
- mLedger.blocks = append(mLedger.blocks, blk)
- mEmptyLedger.blocks = append(mEmptyLedger.blocks, blk)
-
- for i := 1; i <= numBlocks; i++ {
- blk = bookkeeping.MakeBlock(blk.BlockHeader)
- mLedger.blocks = append(mLedger.blocks, blk)
- }
-
- return mLedger, mEmptyLedger
-}
-
func testingenvWithUpgrade(
t testing.TB,
numBlocks,
@@ -642,13 +727,32 @@ func (s *Service) testStart() {
func TestCatchupUnmatchedCertificate(t *testing.T) {
// Make Ledger
- remote, local := testingenv(t, 10)
-
+ numBlocks := 10
+ local := new(mockedLedger)
+ local.blocks = append(local.blocks, bookkeeping.Block{})
lastRoundAtStart := local.LastRound()
+ remote, _, blk, err := buildTestLedger(t, bookkeeping.Block{})
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+ addBlocks(t, remote, blk, numBlocks-1)
+
+ // Create a network and block service
+ blockServiceConfig := config.GetDefaultLocal()
+ net := &httpTestPeerSource{}
+ ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID")
+
+ nodeA := basicRPCNode{}
+ nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
+ nodeA.start()
+ defer nodeA.stop()
+ rootURL := nodeA.rootURL()
+ net.addPeer(rootURL)
+
// Make Service
- s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: int(lastRoundAtStart + 1)}, nil)
- s.latestRoundFetcherFactory = &MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}}
+ s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: int(lastRoundAtStart + 1)}, nil, nil)
s.testStart()
for roundNumber := 2; roundNumber < 10; roundNumber += 3 {
pc := &PendingUnmatchedCertificate{
@@ -660,6 +764,5 @@ func TestCatchupUnmatchedCertificate(t *testing.T) {
block, _ := remote.Block(basics.Round(roundNumber))
pc.Cert.Proposal.BlockDigest = block.Digest()
s.syncCert(pc)
- require.True(t, s.latestRoundFetcherFactory.(*MockedFetcherFactory).fetcher.client.closed)
}
}
diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go
new file mode 100644
index 000000000..6ed00790a
--- /dev/null
+++ b/catchup/universalFetcher.go
@@ -0,0 +1,272 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package catchup
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/algorand/go-deadlock"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/network"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/rpcs"
+)
+
+// UniversalFetcher fetches blocks either from an http peer or ws peer.
+type universalBlockFetcher struct {
+ config config.Local
+ net network.GossipNode
+ log logging.Logger
+}
+
+// makeUniversalFetcher returns a fetcher for http and ws peers.
+func makeUniversalBlockFetcher(log logging.Logger, net network.GossipNode, config config.Local) *universalBlockFetcher {
+ return &universalBlockFetcher{
+ config: config,
+ net: net,
+ log: log}
+}
+
+// fetchBlock returns a block from the peer. The peer can be either an http or ws peer.
+func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Round, peer network.Peer) (blk *bookkeeping.Block,
+ cert *agreement.Certificate, downloadDuration time.Duration, err error) {
+
+ var fetchedBuf []byte
+ var address string
+ blockDownloadStartTime := time.Now()
+ if wsPeer, validWSPeer := peer.(network.UnicastPeer); validWSPeer {
+ fetcherClient := &wsFetcherClient{
+ target: wsPeer,
+ config: &uf.config,
+ }
+ fetchedBuf, err = fetcherClient.getBlockBytes(ctx, round)
+ address = fetcherClient.address()
+ } else if httpPeer, validHTTPPeer := peer.(network.HTTPPeer); validHTTPPeer {
+ fetcherClient := &HTTPFetcher{
+ peer: httpPeer,
+ rootURL: httpPeer.GetAddress(),
+ net: uf.net,
+ client: httpPeer.GetHTTPClient(),
+ log: uf.log,
+ config: &uf.config}
+ fetchedBuf, err = fetcherClient.getBlockBytes(ctx, round)
+ address = fetcherClient.address()
+ } else {
+ return nil, nil, time.Duration(0), fmt.Errorf("fetchBlock: UniversalFetcher only supports HTTPPeer and UnicastPeer")
+ }
+ downloadDuration = time.Now().Sub(blockDownloadStartTime)
+ if err != nil {
+ return nil, nil, time.Duration(0), err
+ }
+ block, cert, err := processBlockBytes(fetchedBuf, round, address)
+ if err != nil {
+ return nil, nil, time.Duration(0), err
+ }
+ uf.log.Debugf("fetchBlock: downloaded block %d in %d from %s", uint64(round), downloadDuration, address)
+ return block, cert, downloadDuration, err
+}
+
+func processBlockBytes(fetchedBuf []byte, r basics.Round, debugStr string) (blk *bookkeeping.Block, cert *agreement.Certificate, err error) {
+ var decodedEntry rpcs.EncodedBlockCert
+ err = protocol.Decode(fetchedBuf, &decodedEntry)
+ if err != nil {
+ err = fmt.Errorf("fetchBlock(%d): cannot decode block from peer %v: %v", r, debugStr, err)
+ return
+ }
+
+ if decodedEntry.Block.Round() != r {
+ err = fmt.Errorf("fetchBlock(%d): got wrong block from peer %v: wanted %v, got %v", r, debugStr, r, decodedEntry.Block.Round())
+ return
+ }
+
+ if decodedEntry.Certificate.Round != r {
+ err = fmt.Errorf("fetchBlock(%d): got wrong cert from peer %v: wanted %v, got %v", r, debugStr, r, decodedEntry.Certificate.Round)
+ return
+ }
+ return &decodedEntry.Block, &decodedEntry.Certificate, nil
+}
+
+// a stub fetcherClient to satisfy the NetworkFetcher interface
+type wsFetcherClient struct {
+ target network.UnicastPeer // the peer where we're going to send the request.
+ config *config.Local
+
+ mu deadlock.Mutex
+}
+
+// getBlockBytes implements FetcherClient
+func (w *wsFetcherClient) getBlockBytes(ctx context.Context, r basics.Round) ([]byte, error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ childCtx, cancelFunc := context.WithTimeout(ctx, time.Duration(w.config.CatchupGossipBlockFetchTimeoutSec)*time.Second)
+ w.mu.Unlock()
+
+ defer func() {
+ cancelFunc()
+ // note that we don't need to have additional Unlock here since
+ // we already have a defered Unlock above ( which executes in reversed order )
+ w.mu.Lock()
+ }()
+
+ blockBytes, err := w.requestBlock(childCtx, r)
+ if err != nil {
+ return nil, err
+ }
+ if len(blockBytes) == 0 {
+ return nil, fmt.Errorf("wsFetcherClient(%d): empty response", r)
+ }
+ return blockBytes, nil
+}
+
+// Address implements FetcherClient
+func (w *wsFetcherClient) address() string {
+ return fmt.Sprintf("[ws] (%v)", w.target.GetAddress())
+}
+
+// requestBlock send a request for block <round> and wait until it receives a response or a context expires.
+func (w *wsFetcherClient) requestBlock(ctx context.Context, round basics.Round) ([]byte, error) {
+ roundBin := make([]byte, binary.MaxVarintLen64)
+ binary.PutUvarint(roundBin, uint64(round))
+ topics := network.Topics{
+ network.MakeTopic(rpcs.RequestDataTypeKey,
+ []byte(rpcs.BlockAndCertValue)),
+ network.MakeTopic(
+ rpcs.RoundKey,
+ roundBin),
+ }
+ resp, err := w.target.Request(ctx, protocol.UniEnsBlockReqTag, topics)
+ if err != nil {
+ return nil, fmt.Errorf("wsFetcherClient(%s).requestBlock(%d): Request failed, %v", w.target.GetAddress(), round, err)
+ }
+
+ if errMsg, found := resp.Topics.GetValue(network.ErrorKey); found {
+ return nil, fmt.Errorf("wsFetcherClient(%s).requestBlock(%d): Request failed, %s", w.target.GetAddress(), round, string(errMsg))
+ }
+
+ blk, found := resp.Topics.GetValue(rpcs.BlockDataKey)
+ if !found {
+ return nil, fmt.Errorf("wsFetcherClient(%s): request failed: block data not found", w.target.GetAddress())
+ }
+ cert, found := resp.Topics.GetValue(rpcs.CertDataKey)
+ if !found {
+ return nil, fmt.Errorf("wsFetcherClient(%s): request failed: cert data not found", w.target.GetAddress())
+ }
+
+ blockCertBytes := protocol.EncodeReflect(rpcs.PreEncodedBlockCert{
+ Block: blk,
+ Certificate: cert})
+
+ return blockCertBytes, nil
+}
+
+// set max fetcher size to 5MB, this is enough to fit the block and certificate
+const fetcherMaxBlockBytes = 5 << 20
+
+var errNoBlockForRound = errors.New("No block available for given round")
+
+// HTTPFetcher implements FetcherClient doing an HTTP GET of the block
+type HTTPFetcher struct {
+ peer network.HTTPPeer
+ rootURL string
+ net network.GossipNode
+
+ client *http.Client
+
+ log logging.Logger
+ config *config.Local
+}
+
+// getBlockBytes gets a block.
+// Core piece of FetcherClient interface
+func (hf *HTTPFetcher) getBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) {
+ parsedURL, err := network.ParseHostOrURL(hf.rootURL)
+ if err != nil {
+ return nil, err
+ }
+
+ parsedURL.Path = rpcs.FormatBlockQuery(uint64(r), parsedURL.Path, hf.net)
+ blockURL := parsedURL.String()
+ hf.log.Debugf("block GET %#v peer %#v %T", blockURL, hf.peer, hf.peer)
+ request, err := http.NewRequest("GET", blockURL, nil)
+ if err != nil {
+ return nil, err
+ }
+ requestCtx, requestCancel := context.WithTimeout(ctx, time.Duration(hf.config.CatchupHTTPBlockFetchTimeoutSec)*time.Second)
+ defer requestCancel()
+ request = request.WithContext(requestCtx)
+ network.SetUserAgentHeader(request.Header)
+ response, err := hf.client.Do(request)
+ if err != nil {
+ hf.log.Debugf("GET %#v : %s", blockURL, err)
+ return nil, err
+ }
+
+ // check to see that we had no errors.
+ switch response.StatusCode {
+ case http.StatusOK:
+ case http.StatusNotFound: // server could not find a block with that round numbers.
+ response.Body.Close()
+ return nil, errNoBlockForRound
+ default:
+ bodyBytes, err := rpcs.ResponseBytes(response, hf.log, fetcherMaxBlockBytes)
+ hf.log.Warnf("HTTPFetcher.getBlockBytes: response status code %d from '%s'. Response body '%s' ", response.StatusCode, blockURL, string(bodyBytes))
+ if err == nil {
+ err = fmt.Errorf("getBlockBytes error response status code %d when requesting '%s'. Response body '%s'", response.StatusCode, blockURL, string(bodyBytes))
+ } else {
+ err = fmt.Errorf("getBlockBytes error response status code %d when requesting '%s'. %w", response.StatusCode, blockURL, err)
+ }
+ return nil, err
+ }
+
+ // at this point, we've already receieved the response headers. ensure that the
+ // response content type is what we'd like it to be.
+ contentTypes := response.Header["Content-Type"]
+ if len(contentTypes) != 1 {
+ err = fmt.Errorf("http block fetcher invalid content type count %d", len(contentTypes))
+ hf.log.Warn(err)
+ response.Body.Close()
+ return nil, err
+ }
+
+ // TODO: Temporarily allow old and new content types so we have time for lazy upgrades
+ // Remove this 'old' string after next release.
+ const blockResponseContentTypeOld = "application/algorand-block-v1"
+ if contentTypes[0] != rpcs.BlockResponseContentType && contentTypes[0] != blockResponseContentTypeOld {
+ hf.log.Warnf("http block fetcher response has an invalid content type : %s", contentTypes[0])
+ response.Body.Close()
+ return nil, fmt.Errorf("http block fetcher invalid content type '%s'", contentTypes[0])
+ }
+
+ return rpcs.ResponseBytes(response, hf.log, fetcherMaxBlockBytes)
+}
+
+// Address is part of FetcherClient interface.
+// Returns the root URL of the connected peer.
+func (hf *HTTPFetcher) address() string {
+ return hf.rootURL
+}
diff --git a/catchup/universalFetcher_test.go b/catchup/universalFetcher_test.go
new file mode 100644
index 000000000..14180625d
--- /dev/null
+++ b/catchup/universalFetcher_test.go
@@ -0,0 +1,129 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package catchup
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/rpcs"
+)
+
+// TestUGetBlockWs tests the universal fetcher ws peer case
+func TestUGetBlockWs(t *testing.T) {
+
+ cfg := config.GetDefaultLocal()
+
+ ledger, next, b, err := buildTestLedger(t, bookkeeping.Block{})
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+
+ blockServiceConfig := config.GetDefaultLocal()
+ blockServiceConfig.EnableBlockService = true
+
+ net := &httpTestPeerSource{}
+
+ up := makeTestUnicastPeer(net, t)
+ ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, ledger, net, "test genesisID")
+ ls.Start()
+
+ fetcher := makeUniversalBlockFetcher(logging.TestingLog(t), net, cfg)
+
+ var block *bookkeeping.Block
+ var cert *agreement.Certificate
+ var duration time.Duration
+
+ block, cert, _, err = fetcher.fetchBlock(context.Background(), next, up)
+
+ require.NoError(t, err)
+ require.Equal(t, &b, block)
+ require.GreaterOrEqual(t, int64(duration), int64(0))
+
+ block, cert, duration, err = fetcher.fetchBlock(context.Background(), next+1, up)
+
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "requested block is not available")
+ require.Nil(t, block)
+ require.Nil(t, cert)
+ require.Equal(t, int64(duration), int64(0))
+}
+
+// TestUGetBlockHttp tests the universal fetcher http peer case
+func TestUGetBlockHttp(t *testing.T) {
+
+ cfg := config.GetDefaultLocal()
+
+ ledger, next, b, err := buildTestLedger(t, bookkeeping.Block{})
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+
+ blockServiceConfig := config.GetDefaultLocal()
+ blockServiceConfig.EnableBlockService = true
+ blockServiceConfig.EnableBlockServiceFallbackToArchiver = false
+
+ net := &httpTestPeerSource{}
+ ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, ledger, net, "test genesisID")
+
+ nodeA := basicRPCNode{}
+ nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
+ nodeA.start()
+ defer nodeA.stop()
+ rootURL := nodeA.rootURL()
+
+ net.addPeer(rootURL)
+ fetcher := makeUniversalBlockFetcher(logging.TestingLog(t), net, cfg)
+
+ var block *bookkeeping.Block
+ var cert *agreement.Certificate
+ var duration time.Duration
+ block, cert, duration, err = fetcher.fetchBlock(context.Background(), next, net.GetPeers()[0])
+
+ require.NoError(t, err)
+ require.Equal(t, &b, block)
+ require.GreaterOrEqual(t, int64(duration), int64(0))
+
+ block, cert, duration, err = fetcher.fetchBlock(context.Background(), next+1, net.GetPeers()[0])
+
+ require.Error(t, errNoBlockForRound, err)
+ require.Contains(t, err.Error(), "No block available for given round")
+ require.Nil(t, block)
+ require.Nil(t, cert)
+ require.Equal(t, int64(duration), int64(0))
+}
+
+// TestUGetBlockUnsupported tests the handling of an unsupported peer
+func TestUGetBlockUnsupported(t *testing.T) {
+ fetcher := universalBlockFetcher{}
+ peer := ""
+ block, cert, duration, err := fetcher.fetchBlock(context.Background(), 1, peer)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "fetchBlock: UniversalFetcher only supports HTTPPeer and UnicastPeer")
+ require.Nil(t, block)
+ require.Nil(t, cert)
+ require.Equal(t, int64(duration), int64(0))
+}
diff --git a/catchup/wsFetcher.go b/catchup/wsFetcher.go
deleted file mode 100644
index 29443b53c..000000000
--- a/catchup/wsFetcher.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package catchup
-
-import (
- "context"
- "encoding/binary"
- "fmt"
- "time"
-
- "github.com/algorand/go-deadlock"
-
- "github.com/algorand/go-algorand/agreement"
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/network"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/rpcs"
-)
-
-// Buffer messages from the network to have fewer drops.
-const numBufferedInternalMsg = 1
-
-// WsFetcher implements Fetcher, getting the block over
-// a custom websockets interface (bidirectional). Internally it keeps track
-// of multiple peers and handles dropping them appropriately using a NetworkFetcher.
-type WsFetcher struct {
- f *NetworkFetcher
- clients map[network.Peer]*wsFetcherClient
- config *config.Local
-
- // metadata
- log logging.Logger
- mu deadlock.RWMutex
-}
-
-// MakeWsFetcher creates a fetcher that fetches over the gossip network.
-// It instantiates a NetworkFetcher under the hood,
-// and demuxes messages appropriately to the corresponding fetcher clients.
-func MakeWsFetcher(log logging.Logger, peers []network.Peer, cfg *config.Local) Fetcher {
- f := &WsFetcher{
- log: log,
- config: cfg,
- }
- f.clients = make(map[network.Peer]*wsFetcherClient)
- p := make([]FetcherClient, len(peers))
- for i, peer := range peers {
- fc := &wsFetcherClient{
- target: peer.(network.UnicastPeer),
- pendingCtxs: make(map[context.Context]context.CancelFunc),
- config: cfg,
- }
- p[i] = fc
- f.clients[peer] = fc
- }
- f.f = &NetworkFetcher{
- roundUpperBound: make(map[FetcherClient]basics.Round),
- activeFetches: make(map[FetcherClient]int),
- peers: p,
- log: f.log,
- }
- return f
-}
-
-// FetchBlock implements Fetcher interface
-func (wsf *WsFetcher) FetchBlock(ctx context.Context, r basics.Round) (*bookkeeping.Block, *agreement.Certificate, FetcherClient, error) {
- return wsf.f.FetchBlock(ctx, r)
-}
-
-// OutOfPeers implements Fetcher interface
-func (wsf *WsFetcher) OutOfPeers(round basics.Round) bool {
- return wsf.f.OutOfPeers(round)
-}
-
-// NumPeers implements Fetcher interface
-func (wsf *WsFetcher) NumPeers() int {
- return wsf.f.NumPeers()
-}
-
-// Close calls a delegate close fn passed in by the parent of this fetcher
-func (wsf *WsFetcher) Close() {
- wsf.f.Close()
-}
-
-// a stub fetcherClient to satisfy the NetworkFetcher interface
-type wsFetcherClient struct {
- target network.UnicastPeer // the peer where we're going to send the request.
- pendingCtxs map[context.Context]context.CancelFunc // a map of all the current pending contexts.
- config *config.Local
-
- closed bool // a flag indicating that the fetcher will not perform additional block retrivals.
-
- mu deadlock.Mutex
-}
-
-// GetBlockBytes implements FetcherClient
-func (w *wsFetcherClient) GetBlockBytes(ctx context.Context, r basics.Round) ([]byte, error) {
- w.mu.Lock()
- defer w.mu.Unlock()
- if w.closed {
- return nil, fmt.Errorf("wsFetcherClient(%d): shutdown", r)
- }
-
- childCtx, cancelFunc := context.WithTimeout(ctx, time.Duration(w.config.CatchupGossipBlockFetchTimeoutSec)*time.Second)
- w.pendingCtxs[childCtx] = cancelFunc
- w.mu.Unlock()
-
- defer func() {
- cancelFunc()
- // note that we don't need to have additional Unlock here since
- // we already have a defered Unlock above ( which executes in reversed order )
- w.mu.Lock()
- delete(w.pendingCtxs, childCtx)
- }()
-
- blockBytes, err := w.requestBlock(childCtx, r)
- if err != nil {
- return nil, err
- }
- if len(blockBytes) == 0 {
- return nil, fmt.Errorf("wsFetcherClient(%d): empty response", r)
- }
- return blockBytes, nil
-}
-
-// Address implements FetcherClient
-func (w *wsFetcherClient) Address() string {
- return fmt.Sprintf("[ws] (%v)", w.target.GetAddress())
-}
-
-// Close is part of FetcherClient interface
-func (w *wsFetcherClient) Close() error {
- w.mu.Lock()
- defer w.mu.Unlock()
- w.closed = true
- for _, cancelFunc := range w.pendingCtxs {
- cancelFunc()
- }
- w.pendingCtxs = make(map[context.Context]context.CancelFunc)
- return nil
-}
-
-// requestBlock send a request for block <round> and wait until it receives a response or a context expires.
-func (w *wsFetcherClient) requestBlock(ctx context.Context, round basics.Round) ([]byte, error) {
- roundBin := make([]byte, binary.MaxVarintLen64)
- binary.PutUvarint(roundBin, uint64(round))
- topics := network.Topics{
- network.MakeTopic(rpcs.RequestDataTypeKey,
- []byte(rpcs.BlockAndCertValue)),
- network.MakeTopic(
- rpcs.RoundKey,
- roundBin),
- }
- resp, err := w.target.Request(ctx, protocol.UniEnsBlockReqTag, topics)
- if err != nil {
- return nil, fmt.Errorf("wsFetcherClient(%s).requestBlock(%d): Request failed, %v", w.target.GetAddress(), round, err)
- }
-
- if errMsg, found := resp.Topics.GetValue(network.ErrorKey); found {
- return nil, fmt.Errorf("wsFetcherClient(%s).requestBlock(%d): Request failed, %s", w.target.GetAddress(), round, string(errMsg))
- }
-
- blk, found := resp.Topics.GetValue(rpcs.BlockDataKey)
- if !found {
- return nil, fmt.Errorf("wsFetcherClient(%s): request failed: block data not found", w.target.GetAddress())
- }
- cert, found := resp.Topics.GetValue(rpcs.CertDataKey)
- if !found {
- return nil, fmt.Errorf("wsFetcherClient(%s): request failed: cert data not found", w.target.GetAddress())
- }
-
- blockCertBytes := protocol.EncodeReflect(rpcs.PreEncodedBlockCert{
- Block: blk,
- Certificate: cert})
-
- return blockCertBytes, nil
-}
diff --git a/cmd/algofix/deadlock.go b/cmd/algofix/deadlock.go
index aae724def..4df1bdc30 100644
--- a/cmd/algofix/deadlock.go
+++ b/cmd/algofix/deadlock.go
@@ -18,6 +18,7 @@ package main
import (
"go/ast"
+ "strings"
)
func init() {
@@ -37,7 +38,29 @@ func deadlock(f *ast.File) bool {
}
fixed := false
+
+ var provisionalRewrites []*ast.SelectorExpr
+
walk(f, func(n interface{}) {
+ if f, ok := n.(*ast.Field); ok {
+ if f.Tag != nil {
+ if strings.Contains(f.Tag.Value, `algofix:"allow sync.Mutex"`) {
+ exceptPos := f.Pos()
+ exceptEnd := f.End()
+ // cancel a provisional rewrite if it winds up being contained in a struct field decl with a tag to allow sync.Mutex
+ for i, e := range provisionalRewrites {
+ if e == nil {
+ continue
+ }
+ if exceptPos <= e.Pos() && e.End() <= exceptEnd {
+ provisionalRewrites[i] = nil
+ }
+ }
+ }
+ }
+ return
+ }
+
e, ok := n.(*ast.SelectorExpr)
if !ok {
return
@@ -50,11 +73,19 @@ func deadlock(f *ast.File) bool {
estr := pkg.Name + "." + e.Sel.Name
if estr == "sync.Mutex" || estr == "sync.RWMutex" {
- e.X = &ast.Ident{Name: "deadlock"}
- fixed = true
+ provisionalRewrites = append(provisionalRewrites, e)
}
})
+ // actually apply any provisional rewrites that weren't cancelled
+ for _, e := range provisionalRewrites {
+ if e == nil {
+ continue
+ }
+ e.X = &ast.Ident{Name: "deadlock"}
+ fixed = true
+ }
+
if fixed {
addImport(f, "github.com/algorand/go-deadlock")
}
diff --git a/cmd/algofix/deadlock_test.go b/cmd/algofix/deadlock_test.go
new file mode 100644
index 000000000..f1f3aa1b0
--- /dev/null
+++ b/cmd/algofix/deadlock_test.go
@@ -0,0 +1,176 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+const deadlockSimpleSrc = `package main
+
+import (
+ "sync"
+)
+
+func main() {
+ // lol wut?
+ var l sync.Mutex
+ var r sync.Mutex
+ var x sync.Mutex
+
+ l.Lock()
+ defer l.Unlock()
+ r.Lock()
+ defer r.Unlock()
+ x.Lock()
+ defer x.Unlock()
+}
+`
+const deadlockSimpleDest = `package main
+
+import (
+ "github.com/algorand/go-deadlock"
+ "sync"
+)
+
+func main() {
+ // lol wut?
+ var l deadlock.Mutex
+ var r deadlock.Mutex
+ var x deadlock.Mutex
+
+ l.Lock()
+ defer l.Unlock()
+ r.Lock()
+ defer r.Unlock()
+ x.Lock()
+ defer x.Unlock()
+}
+`
+
+func tripleTickToBacktick(x string) string {
+ return strings.ReplaceAll(x, "'''", "`")
+}
+
+const deadlockTestSrc = `package main
+
+import (
+ "sync"
+)
+
+type thing struct {
+ l sync.Mutex
+ r sync.Mutex '''algofix:"allow sync.Mutex"'''
+ x sync.Mutex
+}
+
+func (t *thing) foo() {
+ t.l.Lock()
+ defer t.l.Unlock()
+ t.r.Lock()
+ defer t.r.Unlock()
+ t.x.Lock()
+ defer t.x.Unlock()
+}
+
+func main() {
+ var t thing
+ t.foo()
+}
+`
+
+const deadlockTestFin = `package main
+
+import (
+ "github.com/algorand/go-deadlock"
+ "sync"
+)
+
+type thing struct {
+ l deadlock.Mutex
+ r sync.Mutex '''algofix:"allow sync.Mutex"'''
+ x deadlock.Mutex
+}
+
+func (t *thing) foo() {
+ t.l.Lock()
+ defer t.l.Unlock()
+ t.r.Lock()
+ defer t.r.Unlock()
+ t.x.Lock()
+ defer t.x.Unlock()
+}
+
+func main() {
+ var t thing
+ t.foo()
+}
+`
+
+func TestDeadlockRewrite(t *testing.T) {
+ t.Run("simple", func(t *testing.T) { testDeadlock(t, deadlockSimpleSrc, deadlockSimpleDest) })
+ t.Run("onoff", func(t *testing.T) { testDeadlock(t, deadlockTestSrc, deadlockTestFin) })
+}
+
+func testGoFmt(fset *token.FileSet, node interface{}) (out string, err error) {
+ var buf bytes.Buffer
+ err = format.Node(&buf, fset, node)
+ if err == nil {
+ out = string(buf.Bytes())
+ }
+ return
+}
+
+func testDeadlock(t *testing.T, src, dest string) {
+ src = tripleTickToBacktick(src)
+ dest = tripleTickToBacktick(dest)
+ fset := token.NewFileSet()
+ filename := "testmain.go"
+ file, err := parser.ParseFile(fset, filename, src, parserMode)
+ require.NoError(t, err)
+ fixed := deadlock(file)
+ require.True(t, fixed)
+ src2, err := testGoFmt(fset, file)
+ require.NoError(t, err)
+
+ // rinse, repeat?
+ newFile, err := parser.ParseFile(fset, filename, src2, parserMode)
+ require.NoError(t, err)
+ src3, err := testGoFmt(fset, newFile)
+ require.NoError(t, err)
+
+ if string(src3) != dest {
+ fmt.Printf("===== %s orig =====\n", t.Name())
+ fmt.Println(string(src))
+ fmt.Printf("===== %s orig =====\n", t.Name())
+ fmt.Printf("===== %s src2 =====\n", t.Name())
+ fmt.Println(string(src2))
+ fmt.Printf("===== %s src2 =====\n", t.Name())
+ fmt.Printf("===== %s actual =====\n", t.Name())
+ fmt.Println(string(src3))
+ fmt.Printf("===== %s actual =====\n", t.Name())
+ }
+ require.Equal(t, dest, string(src3))
+}
diff --git a/cmd/algoh/main.go b/cmd/algoh/main.go
index 0902496a4..ef4afbb13 100644
--- a/cmd/algoh/main.go
+++ b/cmd/algoh/main.go
@@ -362,7 +362,7 @@ func captureErrorLogs(algohConfig algoh.HostConfig, errorOutput stdCollector, ou
func reportErrorf(format string, args ...interface{}) {
fmt.Fprintf(os.Stderr, format, args...)
- logging.Base().Fatalf(format, args...)
+ logging.Base().Warnf(format, args...)
}
func sendLogs() {
diff --git a/cmd/algokey/part.go b/cmd/algokey/part.go
index 49cc9c0f5..48ba060a3 100644
--- a/cmd/algokey/part.go
+++ b/cmd/algokey/part.go
@@ -81,7 +81,7 @@ var partGenerateCmd = &cobra.Command{
os.Exit(1)
}
- printPartkey(partkey)
+ printPartkey(partkey.Participation)
},
}
@@ -101,8 +101,9 @@ var partInfoCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Cannot load partkey database %s: %v\n", partKeyfile, err)
os.Exit(1)
}
+ partkey.Close()
- printPartkey(partkey)
+ printPartkey(partkey.Participation)
},
}
@@ -128,6 +129,7 @@ var partReparentCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Cannot load partkey database %s: %v\n", partKeyfile, err)
os.Exit(1)
}
+ defer partkey.Close()
partkey.Parent = parent
err = partkey.PersistNewParent()
@@ -136,7 +138,7 @@ var partReparentCmd = &cobra.Command{
os.Exit(1)
}
- printPartkey(partkey)
+ printPartkey(partkey.Participation)
},
}
diff --git a/cmd/catchpointdump/commands.go b/cmd/catchpointdump/commands.go
index 96cb34a39..0ee9b433d 100644
--- a/cmd/catchpointdump/commands.go
+++ b/cmd/catchpointdump/commands.go
@@ -41,11 +41,9 @@ var kmdDataDirFlag string
var versionCheck bool
func init() {
- // file.go
rootCmd.AddCommand(fileCmd)
rootCmd.AddCommand(netCmd)
rootCmd.AddCommand(databaseCmd)
-
}
var rootCmd = &cobra.Command{
diff --git a/cmd/catchpointdump/database.go b/cmd/catchpointdump/database.go
index 95beb16ee..334063fe2 100644
--- a/cmd/catchpointdump/database.go
+++ b/cmd/catchpointdump/database.go
@@ -17,11 +17,16 @@
package main
import (
+ "context"
+ "database/sql"
+ "fmt"
"os"
"github.com/spf13/cobra"
+ "github.com/algorand/go-algorand/crypto/merkletrie"
"github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/util/db"
)
var ledgerTrackerFilename string
@@ -29,6 +34,9 @@ var ledgerTrackerFilename string
func init() {
databaseCmd.Flags().StringVarP(&ledgerTrackerFilename, "tracker", "t", "", "Specify the ledger tracker file name ( i.e. ./ledger.tracker.sqlite )")
databaseCmd.Flags().StringVarP(&outFileName, "output", "o", "", "Specify an outfile for the dump ( i.e. ledger.dump.txt )")
+ databaseCmd.AddCommand(checkCmd)
+
+ checkCmd.Flags().StringVarP(&ledgerTrackerFilename, "tracker", "t", "", "Specify the ledger tracker file name ( i.e. ./ledger.tracker.sqlite )")
}
var databaseCmd = &cobra.Command{
@@ -44,7 +52,7 @@ var databaseCmd = &cobra.Command{
outFile := os.Stdout
var err error
if outFileName != "" {
- outFile, err = os.OpenFile(outFileName, os.O_RDWR|os.O_CREATE, 0755)
+ outFile, err = os.OpenFile(outFileName, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0755)
if err != nil {
reportErrorf("Unable to create file '%s' : %v", outFileName, err)
}
@@ -56,3 +64,63 @@ var databaseCmd = &cobra.Command{
}
},
}
+
+var checkCmd = &cobra.Command{
+ Use: "check",
+ Short: "Performs a consistency checking on the accounts merkle trie",
+ Long: "Performs a consistency checking on the accounts merkle trie",
+ Args: validateNoPosArgsFn,
+ Run: func(cmd *cobra.Command, args []string) {
+ if ledgerTrackerFilename == "" {
+ cmd.HelpFunc()(cmd, args)
+ return
+ }
+
+ outFile := os.Stdout
+ fmt.Fprintf(outFile, "Checking tracker database at %s.\n", ledgerTrackerFilename)
+ err := checkDatabase(ledgerTrackerFilename, outFile)
+ if err != nil {
+ reportErrorf("Error checking database : %v", err)
+ }
+ },
+}
+
+func checkDatabase(databaseName string, outFile *os.File) error {
+ dbAccessor, err := db.MakeAccessor(databaseName, true, false)
+ if err != nil || dbAccessor.Handle == nil {
+ return err
+ }
+ if dbAccessor.Handle == nil {
+ return fmt.Errorf("database handle is nil when opening database %s", databaseName)
+ }
+ defer func() {
+ dbAccessor.Close()
+ }()
+
+ var stats merkletrie.Stats
+ err = dbAccessor.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ committer, err := ledger.MakeMerkleCommitter(tx, false)
+ if err != nil {
+ return err
+ }
+ trie, err := merkletrie.MakeTrie(committer, ledger.TrieMemoryConfig)
+ if err != nil {
+ return err
+ }
+ stats, err = trie.GetStats()
+ if err != nil {
+ return err
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintf(outFile, "Merkle trie statistics:\n")
+ fmt.Fprintf(outFile, " Nodes count: %d\n", stats.NodesCount)
+ fmt.Fprintf(outFile, " Leaf count: %d\n", stats.LeafCount)
+ fmt.Fprintf(outFile, " Depth: %d\n", stats.Depth)
+ fmt.Fprintf(outFile, " Size: %d\n", stats.Size)
+ return nil
+}
diff --git a/cmd/catchpointdump/file.go b/cmd/catchpointdump/file.go
index 9201d504c..15eb857ce 100644
--- a/cmd/catchpointdump/file.go
+++ b/cmd/catchpointdump/file.go
@@ -91,7 +91,7 @@ var fileCmd = &cobra.Command{
outFile := os.Stdout
if outFileName != "" {
- outFile, err = os.OpenFile(outFileName, os.O_RDWR|os.O_CREATE, 0755)
+ outFile, err = os.OpenFile(outFileName, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0755)
if err != nil {
reportErrorf("Unable to create file '%s' : %v", outFileName, err)
}
diff --git a/cmd/catchpointdump/net.go b/cmd/catchpointdump/net.go
index 66fad3cfc..354d2c360 100644
--- a/cmd/catchpointdump/net.go
+++ b/cmd/catchpointdump/net.go
@@ -300,7 +300,7 @@ func makeFileDump(addr string, catchpointFileBytes []byte) error {
}
dirName := "./" + strings.Split(networkName, ".")[0] + "/" + strings.Split(addr, ".")[0]
- outFile, err := os.OpenFile(dirName+"/"+strconv.FormatUint(uint64(round), 10)+".dump", os.O_RDWR|os.O_CREATE, 0755)
+ outFile, err := os.OpenFile(dirName+"/"+strconv.FormatUint(uint64(round), 10)+".dump", os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0755)
if err != nil {
return err
}
diff --git a/cmd/genesis/newgenesis.go b/cmd/genesis/newgenesis.go
index 27a659211..5a7ad8326 100644
--- a/cmd/genesis/newgenesis.go
+++ b/cmd/genesis/newgenesis.go
@@ -19,7 +19,9 @@ package main
import (
"flag"
"fmt"
+ "io"
"log"
+ "os"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/gen"
@@ -54,7 +56,11 @@ func main() {
genesisData.NetworkName = *netName
}
- err = gen.GenerateGenesisFiles(genesisData, config.Consensus, *outDir, !*quiet)
+ var verboseOut io.Writer = nil
+ if !*quiet {
+ verboseOut = os.Stdout
+ }
+ err = gen.GenerateGenesisFiles(genesisData, config.Consensus, *outDir, verboseOut)
if err != nil {
reportErrorf("Cannot write genesis files: %s", err)
}
diff --git a/cmd/goal/account.go b/cmd/goal/account.go
index c5505ec80..e508a71ce 100644
--- a/cmd/goal/account.go
+++ b/cmd/goal/account.go
@@ -760,7 +760,7 @@ var changeOnlineCmd = &cobra.Command{
os.Exit(1)
}
- part = &partkey
+ part = &partkey.Participation
if accountAddress == "" {
accountAddress = part.Parent.String()
}
@@ -810,7 +810,8 @@ func changeAccountOnlineStatus(acct string, part *algodAcct.Participation, goOnl
return nil
}
- return waitForCommit(client, txid)
+ _, err = waitForCommit(client, txid, lastTxRound)
+ return err
}
var addParticipationKeyCmd = &cobra.Command{
@@ -925,7 +926,6 @@ func generateAndRegisterPartKey(address string, currentRound, lastValidRound uin
txFile := ""
err = changeAccountOnlineStatus(address, &part, goOnline, txFile, wallet, currentRound, lastValidRound, fee, leaseBytes, dataDir, client)
if err != nil {
- part.Close()
os.Remove(keyPath)
fmt.Fprintf(os.Stderr, " Error registering keys - deleting newly-generated key file: %s\n", keyPath)
}
@@ -1324,7 +1324,7 @@ var markNonparticipatingCmd = &cobra.Command{
return
}
- err = waitForCommit(client, txid)
+ _, err = waitForCommit(client, txid, lastTxRound)
if err != nil {
reportErrorf("error waiting for transaction to be committed: %v", err)
}
diff --git a/cmd/goal/application.go b/cmd/goal/application.go
index 43e13fa72..fd467a6e0 100644
--- a/cmd/goal/application.go
+++ b/cmd/goal/application.go
@@ -416,15 +416,10 @@ var createAppCmd = &cobra.Command{
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- err = waitForCommit(client, txid)
+ txn, err := waitForCommit(client, txid, lv)
if err != nil {
reportErrorf(err.Error())
}
- // Check if we know about the transaction yet
- txn, err := client.PendingTransactionInformation(txid)
- if err != nil {
- reportErrorf("%v", err)
- }
if txn.TransactionResults != nil && txn.TransactionResults.CreatedAppIndex != 0 {
reportInfof("Created app with app index %d", txn.TransactionResults.CreatedAppIndex)
}
@@ -499,15 +494,10 @@ var updateAppCmd = &cobra.Command{
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- err = waitForCommit(client, txid)
+ _, err = waitForCommit(client, txid, lv)
if err != nil {
reportErrorf(err.Error())
}
- // Check if we know about the transaction yet
- _, err := client.PendingTransactionInformation(txid)
- if err != nil {
- reportErrorf("%v", err)
- }
}
} else {
if dumpForDryrun {
@@ -577,15 +567,10 @@ var optInAppCmd = &cobra.Command{
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- err = waitForCommit(client, txid)
+ _, err = waitForCommit(client, txid, lv)
if err != nil {
reportErrorf(err.Error())
}
- // Check if we know about the transaction yet
- _, err := client.PendingTransactionInformation(txid)
- if err != nil {
- reportErrorf("%v", err)
- }
}
} else {
if dumpForDryrun {
@@ -655,15 +640,10 @@ var closeOutAppCmd = &cobra.Command{
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- err = waitForCommit(client, txid)
+ _, err = waitForCommit(client, txid, lv)
if err != nil {
reportErrorf(err.Error())
}
- // Check if we know about the transaction yet
- _, err := client.PendingTransactionInformation(txid)
- if err != nil {
- reportErrorf("%v", err)
- }
}
} else {
if dumpForDryrun {
@@ -733,15 +713,10 @@ var clearAppCmd = &cobra.Command{
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- err = waitForCommit(client, txid)
+ _, err = waitForCommit(client, txid, lv)
if err != nil {
reportErrorf(err.Error())
}
- // Check if we know about the transaction yet
- _, err := client.PendingTransactionInformation(txid)
- if err != nil {
- reportErrorf("%v", err)
- }
}
} else {
if dumpForDryrun {
@@ -811,15 +786,10 @@ var callAppCmd = &cobra.Command{
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- err = waitForCommit(client, txid)
+ _, err = waitForCommit(client, txid, lv)
if err != nil {
reportErrorf(err.Error())
}
- // Check if we know about the transaction yet
- _, err := client.PendingTransactionInformation(txid)
- if err != nil {
- reportErrorf("%v", err)
- }
}
} else {
if dumpForDryrun {
@@ -889,15 +859,10 @@ var deleteAppCmd = &cobra.Command{
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- err = waitForCommit(client, txid)
+ _, err = waitForCommit(client, txid, lv)
if err != nil {
reportErrorf(err.Error())
}
- // Check if we know about the transaction yet
- _, err := client.PendingTransactionInformation(txid)
- if err != nil {
- reportErrorf("%v", err)
- }
}
} else {
if dumpForDryrun {
diff --git a/cmd/goal/asset.go b/cmd/goal/asset.go
index b0c2cd234..047949c0b 100644
--- a/cmd/goal/asset.go
+++ b/cmd/goal/asset.go
@@ -231,12 +231,7 @@ var createAssetCmd = &cobra.Command{
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- err = waitForCommit(client, txid)
- if err != nil {
- reportErrorf(err.Error())
- }
- // Check if we know about the transaction yet
- txn, err := client.PendingTransactionInformation(txid)
+ txn, err := waitForCommit(client, txid, lv)
if err != nil {
reportErrorf(err.Error())
}
@@ -311,7 +306,7 @@ var destroyAssetCmd = &cobra.Command{
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- err = waitForCommit(client, txid)
+ _, err = waitForCommit(client, txid, lastValid)
if err != nil {
reportErrorf(err.Error())
}
@@ -400,7 +395,7 @@ var configAssetCmd = &cobra.Command{
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- err = waitForCommit(client, txid)
+ _, err = waitForCommit(client, txid, lastValid)
if err != nil {
reportErrorf(err.Error())
}
@@ -481,7 +476,7 @@ var sendAssetCmd = &cobra.Command{
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- err = waitForCommit(client, txid)
+ _, err = waitForCommit(client, txid, lastValid)
if err != nil {
reportErrorf(err.Error())
}
@@ -546,7 +541,7 @@ var freezeAssetCmd = &cobra.Command{
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- err = waitForCommit(client, txid)
+ _, err = waitForCommit(client, txid, lastValid)
if err != nil {
reportErrorf(err.Error())
}
diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go
index 31a0ce8e8..332d245b1 100644
--- a/cmd/goal/clerk.go
+++ b/cmd/goal/clerk.go
@@ -29,6 +29,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
@@ -151,18 +152,18 @@ var clerkCmd = &cobra.Command{
},
}
-func waitForCommit(client libgoal.Client, txid string) error {
+func waitForCommit(client libgoal.Client, txid string, transactionLastValidRound uint64) (txn v1.Transaction, err error) {
// Get current round information
stat, err := client.Status()
if err != nil {
- return fmt.Errorf(errorRequestFail, err)
+ return v1.Transaction{}, fmt.Errorf(errorRequestFail, err)
}
for {
// Check if we know about the transaction yet
- txn, err := client.PendingTransactionInformation(txid)
+ txn, err = client.PendingTransactionInformation(txid)
if err != nil {
- return fmt.Errorf(errorRequestFail, err)
+ return v1.Transaction{}, fmt.Errorf(errorRequestFail, err)
}
if txn.ConfirmedRound > 0 {
@@ -171,17 +172,25 @@ func waitForCommit(client libgoal.Client, txid string) error {
}
if txn.PoolError != "" {
- return fmt.Errorf(txPoolError, txid, txn.PoolError)
+ return v1.Transaction{}, fmt.Errorf(txPoolError, txid, txn.PoolError)
+ }
+
+ // check if we've already committed to the block number equals to the transaction's last valid round.
+ // if this is the case, the transaction would not be included in the blockchain, and we can exit right
+ // here.
+ if transactionLastValidRound > 0 && stat.LastRound >= transactionLastValidRound {
+ return v1.Transaction{}, fmt.Errorf(errorTransactionExpired, txid)
}
reportInfof(infoTxPending, txid, stat.LastRound)
- stat, err = client.WaitForRound(stat.LastRound + 1)
+ // WaitForRound waits until round "stat.LastRound+1" is committed
+ stat, err = client.WaitForRound(stat.LastRound)
if err != nil {
- return fmt.Errorf(errorRequestFail, err)
+ return v1.Transaction{}, fmt.Errorf(errorRequestFail, err)
}
}
- return nil
+ return
}
func createSignedTransaction(client libgoal.Client, signTx bool, dataDir string, walletName string, tx transactions.Transaction) (stxn transactions.SignedTxn, err error) {
@@ -460,7 +469,7 @@ var sendCmd = &cobra.Command{
reportInfof(infoTxIssued, amount, fromAddressResolved, toAddressResolved, txid, fee)
if !noWaitAfterSend {
- err = waitForCommit(client, txid)
+ _, err = waitForCommit(client, txid, lastValid)
if err != nil {
reportErrorf(err.Error())
}
@@ -913,6 +922,17 @@ func assembleFile(fname string) (program []byte) {
ops.ReportProblems(fname)
reportErrorf("%s: %s", fname, err)
}
+ _, params := getProto(protoVersion)
+ if ops.HasStatefulOps {
+ if len(ops.Program) > params.MaxAppProgramLen {
+ reportErrorf(tealAppSize, fname, len(ops.Program), params.MaxAppProgramLen)
+ }
+ } else {
+ if uint64(len(ops.Program)) > params.LogicSigMaxSize {
+ reportErrorf(tealLogicSigSize, fname, len(ops.Program), params.LogicSigMaxSize)
+ }
+ }
+
return ops.Program
}
@@ -1056,8 +1076,11 @@ var dryrunCmd = &cobra.Command{
if txn.Lsig.Blank() {
continue
}
+ if uint64(txn.Lsig.Len()) > params.LogicSigMaxSize {
+ reportErrorf("program size too large: %d > %d", len(txn.Lsig.Logic), params.LogicSigMaxSize)
+ }
ep := logic.EvalParams{Txn: &txn, Proto: &params, GroupIndex: i, TxnGroup: txgroup}
- cost, err := logic.Check(txn.Lsig.Logic, ep)
+ err := logic.Check(txn.Lsig.Logic, ep)
if err != nil {
reportErrorf("program failed Check: %s", err)
}
@@ -1071,7 +1094,7 @@ var dryrunCmd = &cobra.Command{
}
pass, err := logic.Eval(txn.Lsig.Logic, ep)
// TODO: optionally include `inspect` output here?
- fmt.Fprintf(os.Stdout, "tx[%d] cost=%d trace:\n%s\n", i, cost, sb.String())
+ fmt.Fprintf(os.Stdout, "tx[%d] trace:\n%s\n", i, sb.String())
if pass {
fmt.Fprintf(os.Stdout, " - pass -\n")
} else {
diff --git a/cmd/goal/interact.go b/cmd/goal/interact.go
index 4b6eba200..7ed9be407 100644
--- a/cmd/goal/interact.go
+++ b/cmd/goal/interact.go
@@ -622,15 +622,10 @@ var appExecuteCmd = &cobra.Command{
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- err = waitForCommit(client, txid)
+ txn, err := waitForCommit(client, txid, lv)
if err != nil {
reportErrorf(err.Error())
}
- // Check if we know about the transaction yet
- txn, err := client.PendingTransactionInformation(txid)
- if err != nil {
- reportErrorf("%v", err)
- }
if txn.TransactionResults != nil && txn.TransactionResults.CreatedAppIndex != 0 {
reportInfof("Created app with app index %d", txn.TransactionResults.CreatedAppIndex)
}
diff --git a/cmd/goal/messages.go b/cmd/goal/messages.go
index a6796c811..edd6ea74c 100644
--- a/cmd/goal/messages.go
+++ b/cmd/goal/messages.go
@@ -58,6 +58,7 @@ const (
// Node
infoNodeStart = "Algorand node successfully started!"
infoNodeAlreadyStarted = "Algorand node was already started!"
+ infoNodeDidNotRestart = "Algorand node did not restart. The node is still running!"
infoTryingToStopNode = "Trying to stop the node..."
infoNodeShuttingDown = "Algorand node is shutting down..."
infoNodeSuccessfullyStopped = "The node was successfully stopped."
@@ -124,6 +125,7 @@ const (
rekeySenderTargetSameError = "The sender and the resulted multisig address are the same"
noOutputFileError = "--msig-params must be specified with an output file name (-o)"
infoAutoFeeSet = "Automatically set fee to %d MicroAlgos"
+ errorTransactionExpired = "Transaction %s expired before it could be included in a block"
loggingNotConfigured = "Remote logging is not currently configured and won't be enabled"
loggingNotEnabled = "Remote logging is current disabled"
@@ -156,6 +158,9 @@ const (
tealsignTooManyArg = "--set-lsig-arg-idx too large, maximum of %d arguments"
tealsignInfoWroteSig = "Wrote signature for %s to LSig.Args[%d]"
+ tealLogicSigSize = "%s: logicsig program size too large: %d > %d"
+ tealAppSize = "%s: app program size too large: %d > %d"
+
// Wallet
infoRecoveryPrompt = "Please type your recovery mnemonic below, and hit return when you are done: "
infoChoosePasswordPrompt = "Please choose a password for wallet '%s': "
@@ -164,7 +169,6 @@ const (
infoCreatedWallet = "Created wallet '%s'"
infoBackupExplanation = "Your new wallet has a backup phrase that can be used for recovery.\nKeeping this backup phrase safe is extremely important.\nWould you like to see it now? (Y/n): "
infoPrintedBackupPhrase = "Your backup phrase is printed below.\nKeep this information safe -- never share it with anyone!"
- infoBackupPhrase = "\n\x1B[32m%s\033[0m"
infoNoWallets = "No wallets found. You can create a wallet with `goal wallet new`"
errorCouldntCreateWallet = "Couldn't create wallet: %s"
errorCouldntInitializeWallet = "Couldn't initialize wallet: %s"
diff --git a/cmd/goal/messages_common.go b/cmd/goal/messages_common.go
new file mode 100644
index 000000000..39b2ae636
--- /dev/null
+++ b/cmd/goal/messages_common.go
@@ -0,0 +1,24 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+// +build !windows
+
+package main
+
+const (
+ // Wallet
+ infoBackupPhrase = "\n\x1B[32m%s\033[0m"
+)
diff --git a/cmd/goal/messages_windows.go b/cmd/goal/messages_windows.go
new file mode 100644
index 000000000..3d06815cd
--- /dev/null
+++ b/cmd/goal/messages_windows.go
@@ -0,0 +1,22 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package main
+
+const (
+ // Wallet
+ infoBackupPhrase = "\n%s"
+)
diff --git a/cmd/goal/node.go b/cmd/goal/node.go
index 322d4824d..dd42e2381 100644
--- a/cmd/goal/node.go
+++ b/cmd/goal/node.go
@@ -182,14 +182,14 @@ var startCmd = &cobra.Command{
}
algodAlreadyRunning, err := nc.StartAlgod(nodeArgs)
- if algodAlreadyRunning {
- reportInfoln(infoNodeAlreadyStarted)
- }
-
if err != nil {
reportErrorf(errorNodeFailedToStart, err)
} else {
- reportInfoln(infoNodeStart)
+ if algodAlreadyRunning {
+ reportInfoln(infoNodeAlreadyStarted)
+ } else {
+ reportInfoln(infoNodeStart)
+ }
}
})
},
@@ -304,14 +304,15 @@ var restartCmd = &cobra.Command{
}
algodAlreadyRunning, err := nc.StartAlgod(nodeArgs)
- if algodAlreadyRunning {
- reportInfoln(infoNodeAlreadyStarted)
- }
-
if err != nil {
reportErrorf(errorNodeFailedToStart, err)
} else {
- reportInfoln(infoNodeStart)
+ if algodAlreadyRunning {
+ // This can never happen. In case it does, report about it.
+ reportInfoln(infoNodeDidNotRestart)
+ } else {
+ reportInfoln(infoNodeStart)
+ }
}
})
},
diff --git a/cmd/netgoal/generate.go b/cmd/netgoal/generate.go
index 349fbfdf0..25379778c 100644
--- a/cmd/netgoal/generate.go
+++ b/cmd/netgoal/generate.go
@@ -44,6 +44,12 @@ var walletsToGenerate int
var nodeTemplatePath string
var nonParticipatingNodeTemplatePath string
var relayTemplatePath string
+var sourceWallet string
+var rounds uint64
+var roundTxnCount uint64
+var accountsCount uint64
+var assetsCount uint64
+var applicationCount uint64
func init() {
rootCmd.AddCommand(generateCmd)
@@ -61,6 +67,12 @@ func init() {
generateCmd.Flags().StringVarP(&nodeTemplatePath, "node-template", "", "", "json for one node")
generateCmd.Flags().StringVarP(&nonParticipatingNodeTemplatePath, "non-participating-node-template", "", "", "json for non participating node")
generateCmd.Flags().StringVarP(&relayTemplatePath, "relay-template", "", "", "json for a relay node")
+ generateCmd.Flags().StringVarP(&sourceWallet, "wallet-name", "", "", "Source wallet name")
+ generateCmd.Flags().Uint64VarP(&rounds, "rounds", "", 13, "Number of rounds")
+ generateCmd.Flags().Uint64VarP(&roundTxnCount, "ntxns", "", 17, "Transaction count")
+ generateCmd.Flags().Uint64VarP(&accountsCount, "naccounts", "", 31, "Account count")
+ generateCmd.Flags().Uint64VarP(&assetsCount, "nassets", "", 5, "Asset count")
+ generateCmd.Flags().Uint64VarP(&applicationCount, "napps", "", 7, "Application Count")
longParts := make([]string, len(generateTemplateLines)+1)
longParts[0] = generateCmd.Long
@@ -75,6 +87,7 @@ var generateTemplateLines = []string{
"otwt => OneThousandWallets network template",
"otwg => OneThousandWallets genesis data",
"ohwg => OneHundredWallets genesis data",
+ "loadingFile => create accounts database file according to -wallet-name -rounds -ntxns -naccts -nassets -napps options",
}
var generateCmd = &cobra.Command{
@@ -157,6 +170,12 @@ template modes for -t:`,
err = generateWalletGenesis(outputFilename, 1000, 0)
case "ohwg":
err = generateWalletGenesis(outputFilename, 100, 0)
+ case "loadingfile":
+ if sourceWallet == "" {
+ reportErrorf("must specify source wallet name with -wname.")
+ }
+
+ err = generateAccountsLoadingFileTemplate(outputFilename, sourceWallet, rounds, roundTxnCount, accountsCount, assetsCount, applicationCount)
default:
reportInfoln("Please specify a valid template name.\nSupported templates are:")
for _, line := range generateTemplateLines {
@@ -472,3 +491,27 @@ func saveGenesisDataToDisk(genesisData gen.GenesisData, filename string) error {
}
return err
}
+
+func generateAccountsLoadingFileTemplate(templateFilename, sourceWallet string, rounds, roundTxnCount, accountsCount, assetsCount, applicationCount uint64) error {
+
+ var data = remote.BootstrappedNetwork{
+ NumRounds: rounds,
+ RoundTransactionsCount: roundTxnCount,
+ GeneratedAccountsCount: accountsCount,
+ GeneratedAssetsCount: assetsCount,
+ GeneratedApplicationCount: applicationCount,
+ SourceWalletName: sourceWallet,
+ }
+ return saveLoadingFileDataToDisk(data, templateFilename)
+}
+
+func saveLoadingFileDataToDisk(data remote.BootstrappedNetwork, filename string) error {
+ f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ enc := codecs.NewFormattedJSONEncoder(f)
+ return enc.Encode(data)
+}
diff --git a/cmd/netgoal/network.go b/cmd/netgoal/network.go
index 5529e75ec..9e2f12a0f 100644
--- a/cmd/netgoal/network.go
+++ b/cmd/netgoal/network.go
@@ -37,6 +37,7 @@ var miscStringStringTokens []string
var networkUseGenesisFiles bool
var networkIgnoreExistingDir bool
+var bootstrapLoadingFile bool
func init() {
rootCmd.AddCommand(networkBuildCmd)
@@ -50,6 +51,7 @@ func init() {
networkBuildCmd.MarkFlagRequired("recipe")
networkBuildCmd.Flags().BoolVarP(&networkUseGenesisFiles, "use-existing-files", "e", false, "Use existing genesis files.")
+ networkBuildCmd.Flags().BoolVarP(&bootstrapLoadingFile, "gen-db-files", "b", false, "Generate database files.")
networkBuildCmd.Flags().BoolVarP(&networkIgnoreExistingDir, "force", "f", false, "Force generation into existing directory.")
networkBuildCmd.Flags().StringSliceVarP(&miscStringStringTokens, "val", "v", nil, "name=value, may be reapeated")
@@ -140,6 +142,21 @@ func runBuildNetwork() (err error) {
net.GenesisData.VersionModifier = networkGenesisVersionModifier
}
+ var bootstrappedFile string
+ if r.BootstrappedFile != "" {
+ bootstrappedFile = resolveFile(r.BootstrappedFile, templateBaseDir)
+ }
+ if util.FileExists(bootstrappedFile) && bootstrapLoadingFile {
+ fileTemplate, err := remote.LoadBootstrappedData(bootstrappedFile)
+ if err != nil {
+ return fmt.Errorf("error resolving bootstrap file: %v", err)
+ }
+ net.BootstrappedNet = fileTemplate
+ net.SetUseBoostrappedFiles(bootstrapLoadingFile)
+ } else {
+ net.SetUseBoostrappedFiles(false)
+ }
+
net.SetUseExistingGenesisFiles(networkUseGenesisFiles)
err = net.Validate(buildConfig, networkRootDir)
if err != nil {
@@ -187,7 +204,6 @@ func resolveFile(filename string, baseDir string) string {
if filepath.IsAbs(filename) {
return filename
}
-
// Assume path is relative to the directory of the template file
return filepath.Join(baseDir, filename)
}
diff --git a/cmd/netgoal/recipe.go b/cmd/netgoal/recipe.go
index b2210d352..ffd31254f 100644
--- a/cmd/netgoal/recipe.go
+++ b/cmd/netgoal/recipe.go
@@ -22,4 +22,5 @@ type recipe struct {
HostTemplatesFile string
NetworkFile string
TopologyFile string
+ BootstrappedFile string
}
diff --git a/cmd/opdoc/opdoc.go b/cmd/opdoc/opdoc.go
index 3c1be04ce..ef624a095 100644
--- a/cmd/opdoc/opdoc.go
+++ b/cmd/opdoc/opdoc.go
@@ -50,7 +50,7 @@ func typeEnumTableMarkdown(out io.Writer) {
fmt.Fprintf(out, "| Index | \"Type\" string | Description |\n")
fmt.Fprintf(out, "| --- | --- | --- |\n")
for i, name := range logic.TxnTypeNames {
- fmt.Fprintf(out, "| %d | %s | %s |\n", i, markdownTableEscape(name), logic.TypeNameDescription(name))
+ fmt.Fprintf(out, "| %d | %s | %s |\n", i, markdownTableEscape(name), logic.TypeNameDescriptions[name])
}
out.Write([]byte("\n"))
}
@@ -69,7 +69,7 @@ func integerConstantsTableMarkdown(out io.Writer) {
fmt.Fprintf(out, "| Value | Constant name | Description |\n")
fmt.Fprintf(out, "| --- | --- | --- |\n")
for i, name := range logic.TxnTypeNames {
- fmt.Fprintf(out, "| %d | %s | %s |\n", i, markdownTableEscape(name), logic.TypeNameDescription(name))
+ fmt.Fprintf(out, "| %d | %s | %s |\n", i, markdownTableEscape(name), logic.TypeNameDescriptions[name])
}
out.Write([]byte("\n"))
}
@@ -119,7 +119,6 @@ func opToMarkdown(out io.Writer, op *logic.OpSpec) (err error) {
if opextra != "" {
ws = " "
}
- costs := logic.OpAllCosts(op.Name)
fmt.Fprintf(out, "\n## %s%s\n\n- Opcode: 0x%02x%s%s\n", op.Name, immediateMarkdown(op), op.Opcode, ws, opextra)
if op.Args == nil {
fmt.Fprintf(out, "- Pops: _None_\n")
@@ -148,13 +147,18 @@ func opToMarkdown(out io.Writer, op *logic.OpSpec) (err error) {
}
fmt.Fprintf(out, "- %s\n", logic.OpDoc(op.Name))
// if cost changed with versions print all of them
+ costs := logic.OpAllCosts(op.Name)
if len(costs) > 1 {
fmt.Fprintf(out, "- **Cost**:\n")
- for v := 1; v < len(costs); v++ {
- fmt.Fprintf(out, " - %d (LogicSigVersion = %d)\n", costs[v], v)
+ for _, cost := range costs {
+ if cost.From == cost.To {
+ fmt.Fprintf(out, " - %d (LogicSigVersion = %d)\n", cost.Cost, cost.To)
+ } else {
+ fmt.Fprintf(out, " - %d (%d <= LogicSigVersion <= %d)\n", cost.Cost, cost.From, cost.To)
+ }
}
} else {
- cost := costs[0]
+ cost := costs[0].Cost
if cost != 1 {
fmt.Fprintf(out, "- **Cost**: %d\n", cost)
}
@@ -221,13 +225,13 @@ type LanguageSpec struct {
}
func argEnum(name string) []string {
- if name == "txn" || name == "gtxn" {
+ if name == "txn" || name == "gtxn" || name == "gtxns" {
return logic.TxnFieldNames
}
if name == "global" {
return logic.GlobalFieldNames
}
- if name == "txna" || name == "gtxna" {
+ if name == "txna" || name == "gtxna" || name == "gtxnsa" {
return logic.TxnaFieldNames
}
if name == "asset_holding_get" {
@@ -262,13 +266,13 @@ func typeString(types []logic.StackType) string {
}
func argEnumTypes(name string) string {
- if name == "txn" || name == "gtxn" {
+ if name == "txn" || name == "gtxn" || name == "gtxns" {
return typeString(logic.TxnFieldTypes)
}
if name == "global" {
return typeString(logic.GlobalFieldTypes)
}
- if name == "txna" || name == "gtxna" {
+ if name == "txna" || name == "gtxna" || name == "gtxnsa" {
return typeString(logic.TxnaFieldTypes)
}
if name == "asset_holding_get" {
diff --git a/cmd/pingpong/runCmd.go b/cmd/pingpong/runCmd.go
index c1104d5d6..ed834f4ad 100644
--- a/cmd/pingpong/runCmd.go
+++ b/cmd/pingpong/runCmd.go
@@ -66,6 +66,7 @@ var appProgGlobKeys uint32
var appProgLocalKeys uint32
var duration uint32
var rekey bool
+var nftAsaPerSecond uint32
func init() {
rootCmd.AddCommand(runCmd)
@@ -105,6 +106,7 @@ func init() {
runCmd.Flags().BoolVar(&randomLease, "randomlease", false, "set the lease to contain a random value")
runCmd.Flags().BoolVar(&rekey, "rekey", false, "Create RekeyTo transactions. Requires groupsize=2 and any of random flags exc random dst")
runCmd.Flags().Uint32Var(&duration, "duration", 0, "The number of seconds to run the pingpong test, forever if 0")
+ runCmd.Flags().Uint32Var(&nftAsaPerSecond, "nftasapersecond", 0, "The number of NFT-style ASAs to create per second")
}
@@ -302,11 +304,15 @@ var runCmd = &cobra.Command{
}
}
+ cfg.NftAsaPerSecond = nftAsaPerSecond
+
reportInfof("Preparing to initialize PingPong with config:\n")
cfg.Dump(os.Stdout)
+ pps := pingpong.NewPingpong(cfg)
+
// Initialize accounts if necessary
- accounts, cinfo, cfg, err := pingpong.PrepareAccounts(ac, cfg)
+ err = pps.PrepareAccounts(ac)
if err != nil {
reportErrorf("Error preparing accounts for transfers: %v\n", err)
}
@@ -319,7 +325,7 @@ var runCmd = &cobra.Command{
cfg.Dump(os.Stdout)
// Kick off the real processing
- pingpong.RunPingPong(context.Background(), ac, accounts, cinfo, cfg)
+ pps.RunPingPong(context.Background(), ac)
},
}
diff --git a/cmd/tealdbg/cdtState.go b/cmd/tealdbg/cdtState.go
index 0462ed119..8919029a4 100644
--- a/cmd/tealdbg/cdtState.go
+++ b/cmd/tealdbg/cdtState.go
@@ -362,7 +362,9 @@ func prepareTxn(txn *transactions.Transaction, groupIndex int) []fieldDesc {
for field, name := range logic.TxnFieldNames {
if field == int(logic.FirstValidTime) ||
field == int(logic.Accounts) ||
- field == int(logic.ApplicationArgs) {
+ field == int(logic.ApplicationArgs) ||
+ field == int(logic.Assets) ||
+ field == int(logic.Applications) {
continue
}
var value string
@@ -716,7 +718,7 @@ func makeTxnImpl(txn *transactions.Transaction, groupIndex int, preview bool) (d
desc = append(desc, makePrimitive(field))
}
- for _, fieldIdx := range []logic.TxnField{logic.ApplicationArgs, logic.Accounts} {
+ for _, fieldIdx := range []logic.TxnField{logic.ApplicationArgs, logic.Accounts, logic.Assets, logic.Applications} {
fieldID := encodeTxnArrayField(groupIndex, int(fieldIdx))
var length int
switch logic.TxnField(fieldIdx) {
@@ -724,6 +726,10 @@ func makeTxnImpl(txn *transactions.Transaction, groupIndex int, preview bool) (d
length = len(txn.Accounts) + 1
case logic.ApplicationArgs:
length = len(txn.ApplicationArgs)
+ case logic.Assets:
+ length = len(txn.ForeignAssets)
+ case logic.Applications:
+ length = len(txn.ForeignApps) + 1
}
field := makeArray(logic.TxnFieldNames[fieldIdx], length, fieldID)
if preview {
@@ -768,6 +774,10 @@ func makeTxnArrayField(s *cdtState, groupIndex int, fieldIdx int) (desc []cdt.Ru
length = len(txn.Accounts) + 1
case logic.ApplicationArgs:
length = len(txn.ApplicationArgs)
+ case logic.Assets:
+ length = len(txn.ForeignAssets)
+ case logic.Applications:
+ length = len(txn.ForeignApps) + 1
}
elems := txnFieldToArrayFieldDesc(&txn, groupIndex, logic.TxnField(fieldIdx), length)
diff --git a/cmd/tealdbg/dryrunRequest.go b/cmd/tealdbg/dryrunRequest.go
index 760572528..2963372fb 100644
--- a/cmd/tealdbg/dryrunRequest.go
+++ b/cmd/tealdbg/dryrunRequest.go
@@ -17,6 +17,8 @@
package main
import (
+ "log"
+
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
@@ -31,11 +33,15 @@ func ddrFromParams(dp *DebugParams) (ddr v2.DryrunRequest, err error) {
}
var gdr generatedV2.DryrunRequest
- err = protocol.DecodeJSON(dp.DdrBlob, &gdr)
- if err == nil {
+ err1 := protocol.DecodeJSON(dp.DdrBlob, &gdr)
+ if err1 == nil {
ddr, err = v2.DryrunRequestFromGenerated(&gdr)
} else {
err = protocol.DecodeReflect(dp.DdrBlob, &ddr)
+ // if failed report intermediate decoding error
+ if err != nil {
+ log.Printf("Decoding as JSON DryrunRequest object failed: %s", err1.Error())
+ }
}
return
diff --git a/cmd/tealdbg/local.go b/cmd/tealdbg/local.go
index 338a25cf5..f8edad06d 100644
--- a/cmd/tealdbg/local.go
+++ b/cmd/tealdbg/local.go
@@ -60,15 +60,15 @@ func txnGroupFromParams(dp *DebugParams) (txnGroup []transactions.SignedTxn, err
// 1. Attempt json - a single transaction
var txn transactions.SignedTxn
- err = protocol.DecodeJSON(data, &txn)
- if err == nil {
+ err1 := protocol.DecodeJSON(data, &txn)
+ if err1 == nil {
txnGroup = append(txnGroup, txn)
return
}
// 2. Attempt json - array of transactions
- err = protocol.DecodeJSON(data, &txnGroup)
- if err == nil {
+ err2 := protocol.DecodeJSON(data, &txnGroup)
+ if err2 == nil {
return
}
@@ -87,6 +87,16 @@ func txnGroupFromParams(dp *DebugParams) (txnGroup []transactions.SignedTxn, err
txnGroup = append(txnGroup, txn)
}
+ // if conversion failed report all intermediate decoding errors
+ if err != nil {
+ if err1 != nil {
+ log.Printf("Decoding as JSON txn failed: %s", err1.Error())
+ }
+ if err2 != nil {
+ log.Printf("Decoding as JSON txn group failed: %s", err2.Error())
+ }
+ }
+
return
}
@@ -101,19 +111,19 @@ func balanceRecordsFromParams(dp *DebugParams) (records []basics.BalanceRecord,
// 1. Attempt json - a single record
var record basics.BalanceRecord
- err = protocol.DecodeJSON(data, &record)
- if err == nil {
+ err1 := protocol.DecodeJSON(data, &record)
+ if err1 == nil {
records = append(records, record)
return
}
// 2. Attempt json - a array of records
- err = protocol.DecodeJSON(data, &records)
- if err == nil {
+ err2 := protocol.DecodeJSON(data, &records)
+ if err2 == nil {
return
}
- // 2. Attempt msgp - a array of records
+ // 3. Attempt msgp - a array of records
dec := protocol.NewDecoderBytes(data)
for {
var record basics.BalanceRecord
@@ -128,6 +138,16 @@ func balanceRecordsFromParams(dp *DebugParams) (records []basics.BalanceRecord,
records = append(records, record)
}
+ // if conversion failed report all intermediate decoding errors
+ if err != nil {
+ if err1 != nil {
+ log.Printf("Decoding as JSON record failed: %s", err1.Error())
+ }
+ if err2 != nil {
+ log.Printf("Decoding as JSON array of records failed: %s", err2.Error())
+ }
+ }
+
return
}
@@ -335,7 +355,10 @@ func (r *LocalRunner) Setup(dp *DebugParams) (err error) {
r.runs[i].program = data
if IsTextFile(data) {
source := string(data)
- ops, err := logic.AssembleStringWithVersion(source, r.proto.LogicSigVersion)
+ ops, err := logic.AssembleString(source)
+ if ops.Version > r.proto.LogicSigVersion {
+ return fmt.Errorf("Program version (%d) is beyond the maximum supported protocol version (%d)", ops.Version, r.proto.LogicSigVersion)
+ }
if err != nil {
errorLines := ""
for _, lineError := range ops.Errors {
diff --git a/cmd/tealdbg/local_test.go b/cmd/tealdbg/local_test.go
index 4a93825c5..6a844963e 100644
--- a/cmd/tealdbg/local_test.go
+++ b/cmd/tealdbg/local_test.go
@@ -337,7 +337,8 @@ func TestDebugEnvironment(t *testing.T) {
// create sample programs that checks all the environment:
// transaction fields, global properties,
- source := `global Round
+ source := `#pragma version 2
+global Round
int 222
==
global LatestTimestamp
@@ -476,7 +477,8 @@ int 100
a.True(pass)
// check relaxed - opted in for both
- source = `int 1
+ source = `#pragma version 2
+int 1
int 100
app_opted_in
int 1
@@ -499,7 +501,7 @@ int 1
ds.Painless = false
// check ForeignApp
- source = `
+ source = `#pragma version 2
int 300
byte 0x676b657962797465 // gkeybyte
app_global_get_ex
diff --git a/cmd/tealdbg/main.go b/cmd/tealdbg/main.go
index 96763d3ab..6a1004c27 100644
--- a/cmd/tealdbg/main.go
+++ b/cmd/tealdbg/main.go
@@ -135,6 +135,7 @@ var roundNumber uint64
var timestamp int64
var runMode runModeValue = runModeValue{makeCobraStringValue("auto", []string{"signature", "application"})}
var port int
+var iface string
var noFirstRun bool
var noBrowserCheck bool
var noSourceMap bool
@@ -146,6 +147,7 @@ var listenForDrReq bool
func init() {
rootCmd.PersistentFlags().VarP(&frontend, "frontend", "f", "Frontend to use: "+frontend.AllowedString())
rootCmd.PersistentFlags().IntVar(&port, "remote-debugging-port", 9392, "Port to listen on")
+ rootCmd.PersistentFlags().StringVar(&iface, "listen", "127.0.0.1", "Network interface to listen on")
rootCmd.PersistentFlags().BoolVar(&noFirstRun, "no-first-run", false, "")
rootCmd.PersistentFlags().MarkHidden("no-first-run")
rootCmd.PersistentFlags().BoolVar(&noBrowserCheck, "no-default-browser-check", false, "")
@@ -153,7 +155,7 @@ func init() {
rootCmd.PersistentFlags().BoolVar(&noSourceMap, "no-source-map", false, "Do not generate source maps")
rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Verbose output")
- debugCmd.Flags().StringVarP(&proto, "proto", "p", "", "Consensus protocol version for TEAL")
+ debugCmd.Flags().StringVarP(&proto, "proto", "p", "", "Consensus protocol version for TEAL evaluation")
debugCmd.Flags().StringVarP(&txnFile, "txn", "t", "", "Transaction(s) to evaluate TEAL on in form of json or msgpack file")
debugCmd.Flags().IntVarP(&groupIndex, "group-index", "g", 0, "Transaction index in a txn group")
debugCmd.Flags().StringVarP(&balanceFile, "balance", "b", "", "Balance records to evaluate stateful TEAL on in form of json or msgpack file")
@@ -172,7 +174,7 @@ func init() {
}
func debugRemote() {
- ds := makeDebugServer(port, &frontend, nil)
+ ds := makeDebugServer(iface, port, &frontend, nil)
err := ds.startRemote()
if err != nil {
log.Fatalln(err.Error())
@@ -277,7 +279,7 @@ func debugLocal(args []string) {
ListenForDrReq: listenForDrReq,
}
- ds := makeDebugServer(port, &frontend, &dp)
+ ds := makeDebugServer(iface, port, &frontend, &dp)
err = ds.startDebug()
if err != nil {
diff --git a/cmd/tealdbg/server.go b/cmd/tealdbg/server.go
index c47208a05..f20728594 100644
--- a/cmd/tealdbg/server.go
+++ b/cmd/tealdbg/server.go
@@ -90,11 +90,11 @@ type FrontendFactory interface {
Make(router *mux.Router, appAddress string) (da DebugAdapter)
}
-func makeDebugServer(port int, ff FrontendFactory, dp *DebugParams) DebugServer {
+func makeDebugServer(iface string, port int, ff FrontendFactory, dp *DebugParams) DebugServer {
debugger := MakeDebugger()
router := mux.NewRouter()
- appAddress := fmt.Sprintf("127.0.0.1:%d", port)
+ appAddress := fmt.Sprintf("%s:%d", iface, port)
da := ff.Make(router, appAddress)
debugger.AddAdapter(da)
diff --git a/cmd/tealdbg/server_test.go b/cmd/tealdbg/server_test.go
index 99ff96af7..aa138b30b 100644
--- a/cmd/tealdbg/server_test.go
+++ b/cmd/tealdbg/server_test.go
@@ -111,7 +111,7 @@ func serverTestImpl(t *testing.T, run func(t *testing.T, ds *DebugServer) bool,
var ds DebugServer
for attempt < 5 && !started {
port = rand.Intn(maxPortNum-minPortNum) + minPortNum
- ds = makeDebugServer(port, &mockFactory{}, dp)
+ ds = makeDebugServer("127.0.0.1", port, &mockFactory{}, dp)
started = run(t, &ds)
attempt++
}
diff --git a/cmd/updater/systemd-setup.sh b/cmd/updater/systemd-setup.sh
index e0ff78862..bad874513 100755
--- a/cmd/updater/systemd-setup.sh
+++ b/cmd/updater/systemd-setup.sh
@@ -17,7 +17,7 @@ setup_root() {
systemctl daemon-reload
}
-if [ "$#" != 2 ]; then
+if [ "$#" != 2 ] && [ "$#" != 3 ]; then
echo "Usage: $0 username group [bindir]"
exit 1
fi
diff --git a/cmd/updater/update.sh b/cmd/updater/update.sh
index 23693f4a7..136ce67d5 100755
--- a/cmd/updater/update.sh
+++ b/cmd/updater/update.sh
@@ -367,17 +367,23 @@ function run_systemd_action() {
local data_dir=$2
local process_owner
+ # If the service is system-level, check if it's root or sudo
if check_service system "$data_dir"; then
process_owner=$(awk '{ print $1 }' <(ps aux | grep "[a]lgod -d ${data_dir}"))
- if $IS_ROOT || grep sudo <(groups "$process_owner" &> /dev/null); then
+ if $IS_ROOT; then
if systemctl "$action" "algorand@$(systemd-escape "$data_dir")"; then
echo "systemd system service: $action"
return 0
fi
+ elif grep sudo <(groups "$process_owner") &> /dev/null; then
+ if sudo -n systemctl "$action" "algorand@$(systemd-escape "$data_dir")"; then
+ echo "sudo -n systemd system service: $action"
+ return 0
+ fi
fi
- fi
- if check_service user "$data_dir"; then
+ # If the service is user-level then run systemctl --user
+ elif check_service user "$data_dir"; then
if systemctl --user "$action" "algorand@$(systemd-escape "${data_dir}")"; then
echo "systemd user service: $action"
return 0
@@ -387,37 +393,13 @@ function run_systemd_action() {
return 1
}
-function shutdown_node() {
- echo Stopping node...
- if [ "$(pgrep -x algod)" != "" ] || [ "$(pgrep -x kmd)" != "" ] ; then
- if [ -f "${BINDIR}/goal" ]; then
- for DD in "${DATADIRS[@]}"; do
- if [ -f "${DD}/algod.pid" ] || [ -f "${DD}"/**/kmd.pid ] ; then
- echo "Stopping node with data directory ${DD} and waiting..."
- run_systemd_action stop "${DD}"
- "${BINDIR}/goal" node stop -d "${DD}"
- sleep 5
- else
- echo "Node is running but not in ${DD} - not stopping"
- # Clean up zombie (algod|kmd).net files
- rm -f "${DD}/algod.net" "${DD}"/**/kmd.net
- fi
- done
- fi
- else
- echo ... node not running
- fi
-
- RESTART_NODE=1
-}
-
function backup_binaries() {
echo Backing up current binary files...
mkdir -p "${BINDIR}/backup"
BACKUPFILES="algod kmd carpenter doberman goal update.sh updater diagcfg"
# add node_exporter to the files list we're going to backup, but only we if had it previously deployed.
[ -f "${BINDIR}/node_exporter" ] && BACKUPFILES="${BACKUPFILES} node_exporter"
- tar -zcf "${BINDIR}/backup/bin-v${CURRENTVER}.tar.gz" -C "${BINDIR}" "${BACKUPFILES}" >/dev/null 2>&1
+ tar -zcf "${BINDIR}/backup/bin-v${CURRENTVER}.tar.gz" -C "${BINDIR}" ${BACKUPFILES} >/dev/null 2>&1
}
function backup_data() {
@@ -427,12 +409,12 @@ function backup_data() {
echo "Backing up current data files from ${CURDATADIR}..."
mkdir -p "${BACKUPDIR}"
BACKUPFILES="genesis.json wallet-genesis.id"
- tar --no-recursion --exclude='*.log' --exclude='*.log.archive' --exclude='*.tar.gz' -zcf "${BACKUPDIR}/data-v${CURRENTVER}.tar.gz" -C "${CURDATADIR}" "${BACKUPFILES}" >/dev/null 2>&1
+ tar --no-recursion --exclude='*.log' --exclude='*.log.archive' --exclude='*.tar.gz' -zcf "${BACKUPDIR}/data-v${CURRENTVER}.tar.gz" -C "${CURDATADIR}" ${BACKUPFILES} >/dev/null 2>&1
}
function backup_current_version() {
backup_binaries
- for DD in "${DATADIRS[@]}"; do
+ for DD in ${DATADIRS[@]}; do
backup_data "${DD}"
done
}
@@ -562,16 +544,16 @@ function startup_node() {
fi
CURDATADIR=$1
- echo Starting node in ${CURDATADIR}...
+ echo Restarting node in ${CURDATADIR}...
check_install_valid
if [ $? -ne 0 ]; then
fail_and_exit "Installation does not appear to be valid"
fi
- if ! run_systemd_action start "${CURDATADIR}"; then
- echo "No systemd services, starting node with goal."
- ${BINDIR}/goal node start -d "${CURDATADIR}" ${HOSTEDFLAG}
+ if ! run_systemd_action restart "${CURDATADIR}"; then
+ echo "No systemd services, restarting node with goal."
+ ${BINDIR}/goal node restart -d "${CURDATADIR}" ${HOSTEDFLAG}
fi
}
@@ -611,7 +593,7 @@ function apply_fixups() {
# Delete obsolete algorand binary - renamed to 'goal'
rm "${BINDIR}/algorand" >/dev/null 2>&1
- for DD in "${DATADIRS[@]}"; do
+ for DD in ${DATADIRS[@]}; do
clean_legacy_logs "${DD}"
# Purge obsolete cadaver files (now agreement.cdv[.archive])
@@ -679,8 +661,8 @@ else
determine_current_version
fi
-# Shutdown node before backing up so data is consistent and files aren't locked / in-use.
-shutdown_node
+# Any fail_and_exit beyond this point will run a restart
+RESTART_NODE=1
if ! $DRYRUN; then
if [ ${SKIP_UPDATE} -eq 0 ]; then
@@ -695,7 +677,7 @@ if ! $DRYRUN; then
fail_and_exit "Error installing new files"
fi
- for DD in "${DATADIRS[@]}"; do
+ for DD in ${DATADIRS[@]}; do
if ! install_new_data "${DD}"; then
fail_and_exit "Error installing data files into ${DD}"
fi
@@ -703,7 +685,7 @@ if ! $DRYRUN; then
copy_genesis_files
- for DD in "${DATADIRS[@]}"; do
+ for DD in ${DATADIRS[@]}; do
if ! check_for_new_ledger "${DD}"; then
fail_and_exit "Error updating ledger in ${DD}"
fi
diff --git a/compactcert/abstractions.go b/compactcert/abstractions.go
index faa51fd6a..6a369dee1 100644
--- a/compactcert/abstractions.go
+++ b/compactcert/abstractions.go
@@ -54,5 +54,5 @@ type Network interface {
// Accounts captures the aspects of the AccountManager that are used by
// this package.
type Accounts interface {
- Keys() []account.Participation
+ Keys(basics.Round) []account.Participation
}
diff --git a/compactcert/builder.go b/compactcert/builder.go
index 27b2c0117..f6adbfafb 100644
--- a/compactcert/builder.go
+++ b/compactcert/builder.go
@@ -27,6 +27,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
)
@@ -321,7 +322,7 @@ func (ccw *Worker) tryBuilding() {
for rnd, b := range ccw.builders {
firstValid := ccw.ledger.Latest() + 1
- acceptableWeight := ledger.AcceptableCompactCertWeight(b.votersHdr, firstValid)
+ acceptableWeight := ledger.AcceptableCompactCertWeight(b.votersHdr, firstValid, logging.Base())
if b.SignedWeight() < acceptableWeight {
// Haven't signed enough to build the cert at this time..
continue
diff --git a/compactcert/signer.go b/compactcert/signer.go
index ef2ec67e0..767211130 100644
--- a/compactcert/signer.go
+++ b/compactcert/signer.go
@@ -93,17 +93,17 @@ func (ccw *Worker) signBlock(hdr bookkeeping.BlockHeader) {
return
}
- keys := ccw.accts.Keys()
- if len(keys) == 0 {
- // No keys, nothing to do.
- return
- }
-
// Compact cert gets signed by the next round after the block,
// because by the time agreement is reached on the block,
// ephemeral keys for that round could be deleted.
sigKeyRound := hdr.Round + 1
+ keys := ccw.accts.Keys(sigKeyRound)
+ if len(keys) == 0 {
+ // No keys, nothing to do.
+ return
+ }
+
// votersRound is the round containing the merkle root commitment
// for the voters that are going to sign this block.
votersRound := hdr.Round.SubSaturate(basics.Round(proto.CompactCertRounds))
diff --git a/compactcert/worker_test.go b/compactcert/worker_test.go
index a2849120c..cd3351e82 100644
--- a/compactcert/worker_test.go
+++ b/compactcert/worker_test.go
@@ -96,8 +96,13 @@ func (s *testWorkerStubs) addBlock(ccNextRound basics.Round) {
}
}
-func (s *testWorkerStubs) Keys() []account.Participation {
- return s.keys
+func (s *testWorkerStubs) Keys(rnd basics.Round) (out []account.Participation) {
+ for _, part := range s.keys {
+ if part.OverlapsInterval(rnd, rnd) {
+ out = append(out, part)
+ }
+ }
+ return
}
func (s *testWorkerStubs) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) {
@@ -203,7 +208,8 @@ func newPartKey(t testing.TB, parent basics.Address) account.Participation {
part, err := account.FillDBWithParticipationKeys(partDB, parent, 0, 1024*1024, config.Consensus[protocol.ConsensusFuture].DefaultKeyDilution)
require.NoError(t, err)
- return part
+ part.Close()
+ return part.Participation
}
func TestWorkerAllSigs(t *testing.T) {
diff --git a/config/config.go b/config/config.go
index 4c4934681..0ab143b89 100644
--- a/config/config.go
+++ b/config/config.go
@@ -63,7 +63,7 @@ type Local struct {
// Version tracks the current version of the defaults so we can migrate old -> new
// This is specifically important whenever we decide to change the default value
// for an existing parameter. This field tag must be updated any time we add a new version.
- Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15"`
+ Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16"`
// environmental (may be overridden)
// When enabled, stores blocks indefinitally, otherwise, only the most recents blocks
@@ -372,6 +372,52 @@ type Local struct {
// On networks that doesn't have archive servers, this becomes a no-op, as the catchup service would have no
// archive server to pick from, and therefore automatically selects one of the relay nodes.
EnableCatchupFromArchiveServers bool `version[15]:"false"`
+
+ // DisableLocalhostConnectionRateLimit controls whether the incoming connection rate limit would apply for
+ // connections that are originating from the local machine. Setting this to "true", allow to create large
+ // local-machine networks that won't trip the incoming connection limit observed by relays.
+ DisableLocalhostConnectionRateLimit bool `version[16]:"true"`
+
+ // BlockServiceCustomFallbackEndpoints is a comma delimited list of endpoints which the block service uses to
+ // redirect the http requests to in case it does not have the round. If it is not specified, will check
+ // EnableBlockServiceFallbackToArchiver.
+ BlockServiceCustomFallbackEndpoints string `version[16]:""`
+
+ // EnableBlockServiceFallbackToArchiver controls whether the block service redirects the http requests to
+ // an archiver or return StatusNotFound (404) when in does not have the requested round, and
+ // BlockServiceCustomFallbackEndpoints is empty.
+ // The archiver is randomly selected, if none is available, will return StatusNotFound (404).
+ EnableBlockServiceFallbackToArchiver bool `version[16]:"true"`
+
+ // CatchupBlockValidateMode is a development and testing configuration used by the catchup service.
+ // It can be used to omit certain validations to speed up the catchup process, or to apply extra validations which are redundant in normal operation.
+ // This field is a bit-field with:
+ // bit 0: (default 0) 0: verify the block certificate; 1: skip this validation
+ // bit 1: (default 0) 0: verify payset committed hash in block header matches payset hash; 1: skip this validation
+ // bit 2: (default 0) 0: don't verify the transaction signatures on the block are valid; 1: verify the transaction signatures on block
+ // bit 3: (default 0) 0: don't verify that the hash of the recomputed payset matches the hash of the payset committed in the block header; 1: do perform the above verification
+ // Note: not all permutations of the above bitset are currently functional. In particular, the ones that are functional are:
+ // 0 : default behavior.
+ // 3 : speed up catchup by skipping necessary validations
+ // 12 : perform all validation methods (normal and additional). These extra tests helps to verify the integrity of the compiled executable against
+ // previously used executabled, and would not provide any additional security guarantees.
+ CatchupBlockValidateMode int `version[16]:"0"`
+
+ // Generate AccountUpdates telemetry event
+ EnableAccountUpdatesStats bool `version[16]:"false"`
+
+ // Time interval in nanoseconds for generating accountUpdates telemetry event
+ AccountUpdatesStatsInterval time.Duration `version[16]:"5000000000"`
+
+ // ParticipationKeysRefreshInterval is the duration between two consecutive checks to see if new participation
+ // keys have been placed on the genesis directory.
+ ParticipationKeysRefreshInterval time.Duration `version[16]:"60000000000"`
+
+ // DisableNetworking disables all the incoming and outgoing communication a node would perform. This is useful
+ // when we have a single-node private network, where there is no other nodes that need to be communicated with.
+ // features like catchpoint catchup would be rendered completly non-operational, and many of the node inner
+ // working would be completly dis-functional.
+ DisableNetworking bool `version[16]:"false"`
}
// Filenames of config files within the configdir (e.g. ~/.algorand)
@@ -620,3 +666,30 @@ func (cfg Local) DNSSecurityTelemeryAddrEnforced() bool {
// ProposalAssemblyTime is the max amount of time to spend on generating a proposal block. This should eventually have it's own configurable value.
const ProposalAssemblyTime time.Duration = 250 * time.Millisecond
+
+const (
+ catchupValidationModeCertificate = 1
+ catchupValidationModePaysetHash = 2
+ catchupValidationModeVerifyTransactionSignatures = 4
+ catchupValidationModeVerifyApplyData = 8
+)
+
+// CatchupVerifyCertificate returns true if certificate verification is needed
+func (cfg Local) CatchupVerifyCertificate() bool {
+ return cfg.CatchupBlockValidateMode&catchupValidationModeCertificate == 0
+}
+
+// CatchupVerifyPaysetHash returns true if payset hash verification is needed
+func (cfg Local) CatchupVerifyPaysetHash() bool {
+ return cfg.CatchupBlockValidateMode&catchupValidationModePaysetHash == 0
+}
+
+// CatchupVerifyTransactionSignatures returns true if transactions signature verification is needed
+func (cfg Local) CatchupVerifyTransactionSignatures() bool {
+ return cfg.CatchupBlockValidateMode&catchupValidationModeVerifyTransactionSignatures != 0
+}
+
+// CatchupVerifyApplyData returns true if verifying the ApplyData of the payset needed
+func (cfg Local) CatchupVerifyApplyData() bool {
+ return cfg.CatchupBlockValidateMode&catchupValidationModeVerifyApplyData != 0
+}
diff --git a/config/consensus.go b/config/consensus.go
index bf1431c49..544c73633 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -351,6 +351,15 @@ type ConsensusParams struct {
// NoEmptyLocalDeltas updates how ApplyDelta.EvalDelta.LocalDeltas are stored
NoEmptyLocalDeltas bool
+
+ // EnableKeyregCoherencyCheck enable the following extra checks on key registration transactions:
+ // 1. checking that [VotePK/SelectionPK/VoteKeyDilution] are all set or all clear.
+ // 2. checking that the VoteFirst is less or equal to VoteLast.
+ // 3. checking that in the case of going offline, both the VoteFirst and VoteLast are clear.
+ // 4. checking that in the case of going online the VoteLast is non-zero and greater then the current network round.
+ // 5. checking that in the case of going online the VoteFirst is less or equal to the LastValid+1.
+ // 6. checking that in the case of going online the VoteFirst is less or equal to the next network round.
+ EnableKeyregCoherencyCheck bool
}
// PaysetCommitType enumerates possible ways for the block header to commit to
@@ -905,11 +914,16 @@ func initConsensusProtocols() {
vFuture.CompactCertWeightThreshold = (1 << 32) * 30 / 100
vFuture.CompactCertSecKQ = 128
+ vFuture.EnableKeyregCoherencyCheck = true
+
// enable the InitialRewardsRateCalculation fix
vFuture.InitialRewardsRateCalculation = true
// Enable transaction Merkle tree.
vFuture.PaysetCommit = PaysetCommitMerkle
+ // Enable TEAL 4
+ vFuture.LogicSigVersion = 4
+
Consensus[protocol.ConsensusFuture] = vFuture
}
diff --git a/config/local_defaults.go b/config/local_defaults.go
index c62985f30..725409e73 100644
--- a/config/local_defaults.go
+++ b/config/local_defaults.go
@@ -20,17 +20,20 @@
package config
var defaultLocal = Local{
- Version: 15,
+ Version: 16,
+ AccountUpdatesStatsInterval: 5000000000,
AccountsRebuildSynchronousMode: 1,
AnnounceParticipationKey: true,
Archival: false,
BaseLoggerDebugLevel: 4,
+ BlockServiceCustomFallbackEndpoints: "",
BroadcastConnectionsLimit: -1,
CadaverSizeTarget: 1073741824,
CatchpointFileHistoryLength: 365,
CatchpointInterval: 10000,
CatchpointTracking: 0,
CatchupBlockDownloadRetryAttempts: 1000,
+ CatchupBlockValidateMode: 0,
CatchupFailurePeerRefreshRate: 10,
CatchupGossipBlockFetchTimeoutSec: 4,
CatchupHTTPBlockFetchTimeoutSec: 4,
@@ -41,11 +44,15 @@ var defaultLocal = Local{
DNSBootstrapID: "<network>.algorand.network",
DNSSecurityFlags: 1,
DeadlockDetection: 0,
+ DisableLocalhostConnectionRateLimit: true,
+ DisableNetworking: false,
DisableOutgoingConnectionThrottling: false,
+ EnableAccountUpdatesStats: false,
EnableAgreementReporting: false,
EnableAgreementTimeMetrics: false,
EnableAssembleStats: false,
EnableBlockService: false,
+ EnableBlockServiceFallbackToArchiver: true,
EnableCatchupFromArchiveServers: false,
EnableDeveloperAPI: false,
EnableGossipBlockService: true,
@@ -81,6 +88,7 @@ var defaultLocal = Local{
OptimizeAccountsDatabaseOnStartup: false,
OutgoingMessageFilterBucketCount: 3,
OutgoingMessageFilterBucketSize: 128,
+ ParticipationKeysRefreshInterval: 60000000000,
PeerConnectionsUpdateInterval: 3600,
PeerPingPeriodSeconds: 0,
PriorityPeers: map[string]bool{},
diff --git a/config/version.go b/config/version.go
index 57209d55a..85478a928 100644
--- a/config/version.go
+++ b/config/version.go
@@ -33,7 +33,7 @@ const VersionMajor = 2
// VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced.
// Not enforced until after initial public release (x > 0).
-const VersionMinor = 5
+const VersionMinor = 6
// Version is the type holding our full version information.
type Version struct {
diff --git a/crypto/merkletrie/cache_test.go b/crypto/merkletrie/cache_test.go
index 8b50d8ea7..9d35f92aa 100644
--- a/crypto/merkletrie/cache_test.go
+++ b/crypto/merkletrie/cache_test.go
@@ -474,7 +474,7 @@ func TestCacheLoadingDeferedPage(t *testing.T) {
require.NoError(t, err)
// verify that the cache doesn't reset the mtc.deferedPageLoad on loading a non-defered page.
- dupMem := memoryCommitter1.Duplicate()
+ dupMem := memoryCommitter1.Duplicate(false)
mt2, _ := MakeTrie(dupMem, defaultTestMemoryConfig)
lastPage := int64(mt2.nextNodeID) / defaultTestMemoryConfig.NodesCountPerPage
require.Equal(t, uint64(lastPage), mt2.cache.deferedPageLoad)
diff --git a/crypto/merkletrie/committer_test.go b/crypto/merkletrie/committer_test.go
index c9eb54091..c82a6f915 100644
--- a/crypto/merkletrie/committer_test.go
+++ b/crypto/merkletrie/committer_test.go
@@ -26,10 +26,16 @@ import (
)
// Duplicate duplicates the current memory committer.
-func (mc *InMemoryCommitter) Duplicate() (out *InMemoryCommitter) {
+func (mc *InMemoryCommitter) Duplicate(flat bool) (out *InMemoryCommitter) {
out = &InMemoryCommitter{memStore: make(map[uint64][]byte)}
for k, v := range mc.memStore {
- out.memStore[k] = v
+ if flat {
+ out.memStore[k] = v
+ } else {
+ bytes := make([]byte, len(v))
+ copy(bytes[:], v[:])
+ out.memStore[k] = bytes
+ }
}
return
}
@@ -53,7 +59,7 @@ func TestInMemoryCommitter(t *testing.T) {
}
releasedNodes, err := mt1.Evict(true)
require.NoError(t, err)
- savedMemoryCommitter := memoryCommitter.Duplicate()
+ savedMemoryCommitter := memoryCommitter.Duplicate(false)
require.Equal(t, 19282, releasedNodes)
for i := len(hashes) / 2; i < len(hashes); i++ {
mt1.Add(hashes[i][:])
@@ -78,8 +84,8 @@ func TestInMemoryCommitter(t *testing.T) {
}
require.Equal(t, 2425675, storageSize) // 2,425,575 / 50,000 ~= 48 bytes/leaf.
stats, _ := mt1.GetStats()
- require.Equal(t, leafsCount, int(stats.leafCount))
- require.Equal(t, 61926, int(stats.nodesCount))
+ require.Equal(t, leafsCount, int(stats.LeafCount))
+ require.Equal(t, 61926, int(stats.NodesCount))
}
@@ -123,8 +129,8 @@ func TestNoRedundentPages(t *testing.T) {
}
}
stats, _ := mt1.GetStats()
- require.Equal(t, testSize, int(stats.leafCount))
- nodesCount := int(stats.nodesCount)
+ require.Equal(t, testSize, int(stats.LeafCount))
+ nodesCount := int(stats.NodesCount)
require.Equal(t, nodesCount, len(trieNodes))
require.Equal(t, nodesCount, mt1.cache.cachedNodeCount)
}
@@ -187,3 +193,51 @@ func TestMultipleCommits(t *testing.T) {
}
require.Equal(t, storageSize1, storageSize2)
}
+
+func TestIterativeCommits(t *testing.T) {
+ testSize := 1000
+
+ memConfig := MemoryConfig{
+ NodesCountPerPage: 116,
+ CachedNodesCount: 9000,
+ PageFillFactor: 0.95,
+ MaxChildrenPagesThreshold: 64,
+ }
+
+ hashes := make([]crypto.Digest, testSize)
+ for i := 0; i < len(hashes); i++ {
+ hashes[i] = crypto.Hash([]byte{byte(i), byte(i >> 8), byte(i >> 16), byte(i >> 24), byte(0), byte(0)})
+ }
+
+ // initialize memory container.
+ mc := &InMemoryCommitter{}
+ mt, _ := MakeTrie(mc, memConfig)
+ for i := 0; i < len(hashes); i++ {
+ added, err := mt.Add(hashes[i][:])
+ require.True(t, added)
+ require.NoError(t, err)
+ }
+ _, err := mt.Commit()
+ require.NoError(t, err)
+
+ for r := 0; r < 100; r++ {
+ newMC := mc.Duplicate(true)
+ mt, _ = MakeTrie(newMC, memConfig)
+ mc = newMC
+
+ for k := r * 5; k < r*7+len(hashes); k++ {
+ i := k % len(hashes)
+ deleted, err := mt.Delete(hashes[i][:])
+ require.True(t, deleted)
+ require.NoError(t, err)
+ hashes[i] = crypto.Hash([]byte{byte(i), byte(i >> 8), byte(i >> 16), byte(i >> 24), byte(r + 1), byte((r + 1) >> 8)})
+ added, err := mt.Add(hashes[i][:])
+ require.True(t, added)
+ require.NoError(t, err)
+ }
+ _, err := mt.Commit()
+ require.NoError(t, err)
+ mt = nil
+
+ }
+}
diff --git a/crypto/merkletrie/node.go b/crypto/merkletrie/node.go
index a934426b4..ac9605e66 100644
--- a/crypto/merkletrie/node.go
+++ b/crypto/merkletrie/node.go
@@ -47,16 +47,16 @@ var childEntrySize int = int(unsafe.Sizeof(childEntry{}))
// stats recursively update the provided Stats structure with the current node information
func (n *node) stats(cache *merkleTrieCache, stats *Stats, depth int) (err error) {
- stats.nodesCount++
+ stats.NodesCount++
if n.leaf() {
- stats.leafCount++
- if depth > stats.depth {
- stats.depth = depth
+ stats.LeafCount++
+ if depth > stats.Depth {
+ stats.Depth = depth
}
- stats.size += sliceSize + len(n.hash) + bitsetSize
+ stats.Size += sliceSize + len(n.hash) + bitsetSize
return nil
}
- stats.size += sliceSize + len(n.hash) + sliceSize + len(n.children)*childEntrySize + bitsetSize
+ stats.Size += sliceSize + len(n.hash) + sliceSize + len(n.children)*childEntrySize + bitsetSize
for _, child := range n.children {
childNode, err := cache.getNode(child.id)
if err != nil {
diff --git a/crypto/merkletrie/trie.go b/crypto/merkletrie/trie.go
index 2efd76d89..9cb855ccc 100644
--- a/crypto/merkletrie/trie.go
+++ b/crypto/merkletrie/trie.go
@@ -69,10 +69,10 @@ type Trie struct {
// Stats structure is a helper for finding underlaying statistics about the trie
type Stats struct {
- nodesCount uint
- leafCount uint
- depth int
- size int
+ NodesCount uint
+ LeafCount uint
+ Depth int
+ Size int
}
// MakeTrie creates a merkle trie
diff --git a/crypto/merkletrie/trie_test.go b/crypto/merkletrie/trie_test.go
index 46e23b1fc..120ac9ed3 100644
--- a/crypto/merkletrie/trie_test.go
+++ b/crypto/merkletrie/trie_test.go
@@ -43,16 +43,16 @@ func TestAddingAndRemoving(t *testing.T) {
require.Equal(t, true, addResult)
rootsWhileAdding[i], _ = mt.RootHash()
stats, _ := mt.GetStats()
- require.Equal(t, i+1, int(stats.leafCount))
+ require.Equal(t, i+1, int(stats.LeafCount))
}
stats, _ := mt.GetStats()
- require.Equal(t, len(hashes), int(stats.leafCount))
- require.Equal(t, 4, int(stats.depth))
- require.Equal(t, 10915, int(stats.nodesCount))
- require.Equal(t, 1135745, int(stats.size))
- require.True(t, int(stats.nodesCount) > len(hashes))
- require.True(t, int(stats.nodesCount) < 2*len(hashes))
+ require.Equal(t, len(hashes), int(stats.LeafCount))
+ require.Equal(t, 4, int(stats.Depth))
+ require.Equal(t, 10915, int(stats.NodesCount))
+ require.Equal(t, 1135745, int(stats.Size))
+ require.True(t, int(stats.NodesCount) > len(hashes))
+ require.True(t, int(stats.NodesCount) < 2*len(hashes))
allHashesAddedRoot, _ := mt.RootHash()
@@ -67,8 +67,8 @@ func TestAddingAndRemoving(t *testing.T) {
roothash, _ := mt.RootHash()
require.Equal(t, crypto.Digest{}, roothash)
stats, _ = mt.GetStats()
- require.Equal(t, 0, int(stats.leafCount))
- require.Equal(t, 0, int(stats.depth))
+ require.Equal(t, 0, int(stats.LeafCount))
+ require.Equal(t, 0, int(stats.Depth))
// add the items in a different order.
hashesOrder := rand.New(rand.NewSource(1234567)).Perm(len(hashes))
diff --git a/daemon/algod/api/server/v1/handlers/responses.go b/daemon/algod/api/server/v1/handlers/responses.go
index 74f2e746a..33a8b75f4 100644
--- a/daemon/algod/api/server/v1/handlers/responses.go
+++ b/daemon/algod/api/server/v1/handlers/responses.go
@@ -177,7 +177,7 @@ func (r TransactionParamsResponse) getBody() interface{} {
// RawBlockResponse contains encoded, raw block information
//
-// swagger:response RawBlockResponse
+// swagger:ignore
type RawBlockResponse struct {
// in: body
Body *v1.RawBlock
diff --git a/daemon/algod/api/server/v2/dryrun_test.go b/daemon/algod/api/server/v2/dryrun_test.go
index 0a08cc0d0..e1becc775 100644
--- a/daemon/algod/api/server/v2/dryrun_test.go
+++ b/daemon/algod/api/server/v2/dryrun_test.go
@@ -346,8 +346,9 @@ func init() {
// legder requires proto string and proto params set
var proto config.ConsensusParams
- proto.LogicSigVersion = 2
- proto.LogicSigMaxCost = 1000
+ proto.LogicSigVersion = 4
+ proto.LogicSigMaxCost = 20000
+ proto.MaxAppProgramCost = 700
proto.MaxAppKeyLen = 64
proto.MaxAppBytesValueLen = 64
diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go
index aaed6a930..7a74c6b2a 100644
--- a/daemon/algod/api/server/v2/handlers.go
+++ b/daemon/algod/api/server/v2/handlers.go
@@ -22,6 +22,7 @@ import (
"errors"
"fmt"
"io"
+ "math"
"net/http"
"time"
@@ -219,7 +220,7 @@ func (v2 *Handlers) GetProof(ctx echo.Context, round uint64, txid string, params
return internalError(ctx, err, "generating proof", v2.Log)
}
- var proofconcat []byte
+ proofconcat := make([]byte, 0)
for _, proofelem := range proof {
proofconcat = append(proofconcat, proofelem[:]...)
}
@@ -572,7 +573,7 @@ func (v2 *Handlers) getPendingTransactions(ctx echo.Context, max *uint64, format
return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log)
}
- txns, err := v2.Node.GetPendingTxnsFromPool()
+ txnPool, err := v2.Node.GetPendingTxnsFromPool()
if err != nil {
return internalError(ctx, err, errFailedLookingUpTransactionPool, v2.Log)
}
@@ -583,11 +584,16 @@ func (v2 *Handlers) getPendingTransactions(ctx echo.Context, max *uint64, format
RewardsPool: basics.Address{},
}
+ txnLimit := uint64(math.MaxUint64)
+ if max != nil && *max != 0 {
+ txnLimit = *max
+ }
+
// Convert transactions to msgp / json strings
- txnArray := make([]transactions.SignedTxn, 0)
- for _, txn := range txns {
+ topTxns := make([]transactions.SignedTxn, 0)
+ for _, txn := range txnPool {
// break out if we've reached the max number of transactions
- if max != nil && uint64(len(txnArray)) >= *max {
+ if uint64(len(topTxns)) >= txnLimit {
break
}
@@ -596,7 +602,7 @@ func (v2 *Handlers) getPendingTransactions(ctx echo.Context, max *uint64, format
continue
}
- txnArray = append(txnArray, txn)
+ topTxns = append(topTxns, txn)
}
// Encoding wasn't working well without embedding "real" objects.
@@ -604,8 +610,8 @@ func (v2 *Handlers) getPendingTransactions(ctx echo.Context, max *uint64, format
TopTransactions []transactions.SignedTxn `json:"top-transactions"`
TotalTransactions uint64 `json:"total-transactions"`
}{
- TopTransactions: txnArray,
- TotalTransactions: uint64(len(txnArray)),
+ TopTransactions: topTxns,
+ TotalTransactions: uint64(len(txnPool)),
}
data, err := encode(handle, response)
diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go
index dcfd4e9a6..da98d6549 100644
--- a/daemon/algod/api/server/v2/test/handlers_test.go
+++ b/daemon/algod/api/server/v2/test/handlers_test.go
@@ -189,21 +189,45 @@ func TestPendingTransactionInformation(t *testing.T) {
pendingTransactionInformationTest(t, 0, "bad format", 400)
}
-func getPendingTransactionsTest(t *testing.T, format string, expectedCode int) {
+func getPendingTransactionsTest(t *testing.T, format string, max uint64, expectedCode int) {
handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t)
defer releasefunc()
- params := generatedV2.GetPendingTransactionsParams{Format: &format}
+ params := generatedV2.GetPendingTransactionsParams{Format: &format, Max: &max}
err := handler.GetPendingTransactions(c, params)
require.NoError(t, err)
require.Equal(t, expectedCode, rec.Code)
+ if format == "json" && rec.Code == 200 {
+ var response generatedV2.PendingTransactionsResponse
+
+ data := rec.Body.Bytes()
+ err = protocol.DecodeJSON(data, &response)
+ require.NoError(t, err, string(data))
+
+ if max == 0 || max >= uint64(len(txnPoolGolden)) {
+ // all pending txns should be returned
+ require.Equal(t, uint64(len(response.TopTransactions)), uint64(len(txnPoolGolden)))
+ } else {
+ // only max txns should be returned
+ require.Equal(t, uint64(len(response.TopTransactions)), max)
+ }
+
+ require.Equal(t, response.TotalTransactions, uint64(len(txnPoolGolden)))
+ require.GreaterOrEqual(t, response.TotalTransactions, uint64(len(response.TopTransactions)))
+ }
}
func TestPendingTransactions(t *testing.T) {
t.Parallel()
- getPendingTransactionsTest(t, "json", 200)
- getPendingTransactionsTest(t, "msgpack", 200)
- getPendingTransactionsTest(t, "bad format", 400)
+ getPendingTransactionsTest(t, "json", 0, 200)
+ getPendingTransactionsTest(t, "json", 1, 200)
+ getPendingTransactionsTest(t, "json", 2, 200)
+ getPendingTransactionsTest(t, "json", 3, 200)
+ getPendingTransactionsTest(t, "msgpack", 0, 200)
+ getPendingTransactionsTest(t, "msgpack", 1, 200)
+ getPendingTransactionsTest(t, "msgpack", 2, 200)
+ getPendingTransactionsTest(t, "msgpack", 3, 200)
+ getPendingTransactionsTest(t, "bad format", 0, 400)
}
func pendingTransactionsByAddressTest(t *testing.T, rootkeyToUse int, format string, expectedCode int) {
diff --git a/daemon/algod/api/server/v2/test/helpers.go b/daemon/algod/api/server/v2/test/helpers.go
index 398788e4e..b5962cf4c 100644
--- a/daemon/algod/api/server/v2/test/helpers.go
+++ b/daemon/algod/api/server/v2/test/helpers.go
@@ -74,6 +74,7 @@ var poolAddrResponseGolden = generatedV2.AccountResponse{
AppsTotalSchema: &appsTotalSchema,
CreatedApps: &appCreatedApps,
}
+var txnPoolGolden = make([]transactions.SignedTxn, 2)
// ordinarily mockNode would live in `components/mocks`
// but doing this would create an import cycle, as mockNode needs
@@ -120,7 +121,7 @@ func (m mockNode) GetPendingTransaction(txID transactions.Txid) (res node.TxnWit
}
func (m mockNode) GetPendingTxnsFromPool() ([]transactions.SignedTxn, error) {
- return nil, m.err
+ return txnPoolGolden, m.err
}
func (m mockNode) SuggestedFee() basics.MicroAlgos {
@@ -237,7 +238,7 @@ func testingenv(t testing.TB, numAccounts, numTxs int, offlineAccounts bool) (*d
}
roots[i] = root
- parts[i] = part
+ parts[i] = part.Participation
startamt := basics.MicroAlgos{Raw: uint64(minMoneyAtStart + (gen.Int() % (maxMoneyAtStart - minMoneyAtStart)))}
short := root.Address()
@@ -250,6 +251,7 @@ func testingenv(t testing.TB, numAccounts, numTxs int, offlineAccounts bool) (*d
data.VoteID = parts[i].VotingSecrets().OneTimeSignatureVerifier
genesis[short] = data
}
+ part.Close()
}
genesis[poolAddr] = basics.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 100000 * uint64(proto.RewardsRateRefreshInterval)})
diff --git a/daemon/algod/deadlockLogger.go b/daemon/algod/deadlockLogger.go
index b7fdc3890..ed5139592 100644
--- a/daemon/algod/deadlockLogger.go
+++ b/daemon/algod/deadlockLogger.go
@@ -21,48 +21,81 @@ import (
"fmt"
"os"
"runtime"
+ "sync"
"github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/logging"
)
-type dumpLogger struct {
+type deadlockLogger struct {
logging.Logger
*bytes.Buffer
+ bufferSync chan struct{}
+ panic func()
+ reportDeadlock sync.Once
}
-func (logger *dumpLogger) dump() {
- logger.Error(logger.String())
+// Panic is defined here just so we can emulate the usage of the deadlockLogger
+func (logger *deadlockLogger) Panic() {
+ logger.Logger.Panic("potential deadlock detected")
}
-var logger = dumpLogger{Logger: logging.Base(), Buffer: bytes.NewBuffer(make([]byte, 0))}
-
-var deadlockPanic func()
+// Write implements the io.Writer interface, ensuring that the write is syncronized.
+func (logger *deadlockLogger) Write(p []byte) (n int, err error) {
+ logger.bufferSync <- struct{}{}
+ n, err = logger.Buffer.Write(p)
+ <-logger.bufferSync
+ return
+}
-func setupDeadlockLogger() {
- deadlockPanic = func() {
- logger.Panic("potential deadlock detected")
+// captureCallstack captures the callstack and return a byte array of the output.
+func captureCallstack() []byte {
+ // Capture all goroutine stacks
+ var buf []byte
+ bufferSize := 256 * 1024
+ for {
+ buf = make([]byte, bufferSize)
+ if runtime.Stack(buf, true) < bufferSize {
+ break
+ }
+ bufferSize *= 2
}
+ return buf
+}
- deadlock.Opts.LogBuf = logger
- deadlock.Opts.OnPotentialDeadlock = func() {
+// onPotentialDeadlock is the handler to be used by the deadlock library.
+func (logger *deadlockLogger) onPotentialDeadlock() {
+ // The deadlock reporting is done only once; this would prevent recursive deadlock issues.
+ // in practive, once we report the deadlock, we panic and abort anyway, so it won't be an issue.
+ logger.reportDeadlock.Do(func() {
// Capture all goroutine stacks
- var buf []byte
- bufferSize := 256 * 1024
- for {
- buf = make([]byte, bufferSize)
- if runtime.Stack(buf, true) < bufferSize {
- break
- }
- bufferSize *= 2
- }
+ buf := captureCallstack()
- // Run this code in a separate goroutine because it might grab locks.
+ logger.bufferSync <- struct{}{}
+ loggedString := logger.String()
+ <-logger.bufferSync
+
+ fmt.Fprintln(os.Stderr, string(buf))
+
+ // logging the logged string to the logger has to happen in a separate go-routine, since the
+ // logger itself ( for instance, the CyclicLogWriter ) is using a mutex of it's own.
go func() {
- logger.dump()
- fmt.Fprintln(os.Stderr, string(buf))
- deadlockPanic()
+ logger.Error(loggedString)
+ logger.panic()
}()
+ })
+}
+
+func setupDeadlockLogger() *deadlockLogger {
+ logger := &deadlockLogger{
+ Logger: logging.Base(),
+ Buffer: bytes.NewBuffer(make([]byte, 0)),
+ bufferSync: make(chan struct{}, 1),
}
+
+ logger.panic = logger.Panic
+ deadlock.Opts.LogBuf = logger
+ deadlock.Opts.OnPotentialDeadlock = logger.onPotentialDeadlock
+ return logger
}
diff --git a/daemon/algod/deadlock_test.go b/daemon/algod/deadlock_test.go
index b38a16138..8df0baec5 100644
--- a/daemon/algod/deadlock_test.go
+++ b/daemon/algod/deadlock_test.go
@@ -35,10 +35,10 @@ func TestDeadlockLogging(t *testing.T) {
logWriter := logging.MakeCyclicFileWriter(logFn, archiveFn, 65536, time.Hour)
l.SetOutput(logWriter)
- setupDeadlockLogger()
+ logger := setupDeadlockLogger()
deadlockCh := make(chan struct{})
- deadlockPanic = func() {
+ logger.panic = func() {
close(deadlockCh)
}
@@ -55,3 +55,36 @@ func TestDeadlockLogging(t *testing.T) {
_ = <-deadlockCh
}
+
+func TestDeadlockOnPotentialDeadlock(t *testing.T) {
+ logFn := fmt.Sprintf("/tmp/test.%s.%d.log", t.Name(), crypto.RandUint64())
+ archiveFn := fmt.Sprintf("%s.archive", logFn)
+
+ l := logging.Base()
+ logWriter := logging.MakeCyclicFileWriter(logFn, archiveFn, 65536, time.Hour)
+ l.SetOutput(logWriter)
+
+ logger := setupDeadlockLogger()
+
+ deadlockCh := make(chan struct{})
+ logger.panic = func() {
+ close(deadlockCh)
+ }
+
+ defer func() {
+ r := recover()
+ if r != nil {
+ fmt.Printf("Recovered: %v\n", r)
+ }
+ }()
+
+ for linenum := 0; linenum < 10; linenum++ {
+ fmt.Fprintf(logger, "line %d", linenum)
+ }
+ logger.onPotentialDeadlock()
+ for linenum := 10; linenum < 20; linenum++ {
+ fmt.Fprintf(logger, "line %d", linenum)
+ }
+
+ _ = <-deadlockCh
+}
diff --git a/daemon/kmd/lib/kmdapi/requests.go b/daemon/kmd/lib/kmdapi/requests.go
index 0411dddbf..7e18c1219 100644
--- a/daemon/kmd/lib/kmdapi/requests.go
+++ b/daemon/kmd/lib/kmdapi/requests.go
@@ -27,7 +27,7 @@ type APIV1Request interface{} // we need to tell swagger to ignore due to bug (g
// APIV1RequestEnvelope is a common envelope that all API V1 requests must embed
//
-// swagger:ignore
+// swagger:model VersionsRequest
type APIV1RequestEnvelope struct { // we need to tell swagger to ignore due to bug (go-swagger/issues/1436)
_struct struct{} `codec:",omitempty,omitemptyarray"`
}
@@ -165,7 +165,7 @@ type APIV1POSTTransactionSignRequest struct {
// Note: SDK and goal usually generate `SignedTxn` objects
// in that case, the field `txn` / `Transaction` of the
// generated `SignedTxn` object needs to be used
- //
+ //
// swagger:strfmt byte
Transaction []byte `json:"transaction"`
PublicKey crypto.PublicKey `json:"public_key"`
diff --git a/daemon/kmd/lib/kmdapi/responses.go b/daemon/kmd/lib/kmdapi/responses.go
index 8dd5e6c29..dee75b8a2 100644
--- a/daemon/kmd/lib/kmdapi/responses.go
+++ b/daemon/kmd/lib/kmdapi/responses.go
@@ -54,6 +54,14 @@ type VersionsResponse struct {
Versions []string `json:"versions"`
}
+// Response to `GET /versions`
+// swagger:response VersionsResponse
+type versionsResponse struct {
+ //Versions Response
+ //in:body
+ Body *VersionsResponse
+}
+
// APIV1GETWalletsResponse is the response to `GET /v1/wallets`
// friendly:ListWalletsResponse
type APIV1GETWalletsResponse struct {
@@ -61,6 +69,13 @@ type APIV1GETWalletsResponse struct {
Wallets []APIV1Wallet `json:"wallets"`
}
+// Response to `GET /v1/wallets`
+// swagger:response ListWalletsResponse
+type listWalletsResponse struct {
+ //in: body
+ Body *APIV1GETWalletsResponse
+}
+
// APIV1POSTWalletResponse is the response to `POST /v1/wallet`
// friendly:CreateWalletResponse
type APIV1POSTWalletResponse struct {
@@ -68,6 +83,13 @@ type APIV1POSTWalletResponse struct {
Wallet APIV1Wallet `json:"wallet"`
}
+// Response to `POST /v1/wallet`
+// swagger:response CreateWalletResponse
+type createWalletResponse struct {
+ // in:body
+ Body *APIV1POSTWalletResponse
+}
+
// APIV1POSTWalletInitResponse is the response to `POST /v1/wallet/init`
// friendly:InitWalletHandleTokenResponse
type APIV1POSTWalletInitResponse struct {
@@ -75,12 +97,26 @@ type APIV1POSTWalletInitResponse struct {
WalletHandleToken string `json:"wallet_handle_token"`
}
+// Response to `POST /v1/wallet/init`
+// swagger:response InitWalletHandleTokenResponse
+type initWalletHandleTokenResponse struct {
+ // in:body
+ Body *APIV1POSTWalletInitResponse
+}
+
// APIV1POSTWalletReleaseResponse is the response to `POST /v1/wallet/release`
// friendly:ReleaseWalletHandleTokenResponse
type APIV1POSTWalletReleaseResponse struct {
APIV1ResponseEnvelope
}
+// Response to `POST /v1/wallet/release`
+// swagger:response ReleaseWalletHandleTokenResponse
+type releaseWalletHandleTokenResponse struct {
+ // in:body
+ Body *APIV1POSTWalletReleaseResponse
+}
+
// APIV1POSTWalletRenewResponse is the response to `POST /v1/wallet/renew`
// friendly:RenewWalletHandleTokenResponse
type APIV1POSTWalletRenewResponse struct {
@@ -88,6 +124,13 @@ type APIV1POSTWalletRenewResponse struct {
WalletHandle APIV1WalletHandle `json:"wallet_handle"`
}
+// Response `POST /v1/wallet/renew`
+// swagger:response RenewWalletHandleTokenResponse
+type renewWalletHandleTokenResponse struct {
+ // in:body
+ Body *APIV1POSTWalletRenewResponse
+}
+
// APIV1POSTWalletRenameResponse is the response to `POST /v1/wallet/rename`
// friendly:RenameWalletResponse
type APIV1POSTWalletRenameResponse struct {
@@ -95,6 +138,13 @@ type APIV1POSTWalletRenameResponse struct {
Wallet APIV1Wallet `json:"wallet"`
}
+// Response to `POST /v1/wallet/rename`
+// swagger:response RenameWalletResponse
+type renameWalletResponse struct {
+ // in:body
+ Body *APIV1POSTWalletRenameResponse
+}
+
// APIV1POSTWalletInfoResponse is the response to `POST /v1/wallet/info`
// friendly:WalletInfoResponse
type APIV1POSTWalletInfoResponse struct {
@@ -102,6 +152,13 @@ type APIV1POSTWalletInfoResponse struct {
WalletHandle APIV1WalletHandle `json:"wallet_handle"`
}
+// Response to `POST /v1/wallet/info`
+// swagger:response WalletInfoResponse
+type walletInfoResponse struct {
+ // in:body
+ Body *APIV1POSTWalletInfoResponse
+}
+
// APIV1POSTMasterKeyExportResponse is the reponse to `POST /v1/master-key/export`
// friendly:ExportMasterKeyResponse
type APIV1POSTMasterKeyExportResponse struct {
@@ -109,6 +166,13 @@ type APIV1POSTMasterKeyExportResponse struct {
MasterDerivationKey APIV1MasterDerivationKey `json:"master_derivation_key"`
}
+// Reponse to `POST /v1/master-key/export`
+// swagger:response ExportMasterKeyResponse
+type exportMasterKeyResponse struct {
+ // in:body
+ Body *APIV1POSTMasterKeyExportResponse
+}
+
// APIV1POSTKeyImportResponse is the repsonse to `POST /v1/key/import`
// friendly:ImportKeyResponse
type APIV1POSTKeyImportResponse struct {
@@ -116,6 +180,13 @@ type APIV1POSTKeyImportResponse struct {
Address string `json:"address"`
}
+// Repsonse to `POST /v1/key/import`
+// swagger:response ImportKeyResponse
+type importKeyResponse struct {
+ // in:body
+ Body *APIV1POSTKeyImportResponse
+}
+
// APIV1POSTKeyExportResponse is the reponse to `POST /v1/key/export`
// friendly:ExportKeyResponse
type APIV1POSTKeyExportResponse struct {
@@ -123,6 +194,13 @@ type APIV1POSTKeyExportResponse struct {
PrivateKey APIV1PrivateKey `json:"private_key"`
}
+// Reponse to `POST /v1/key/export`
+// swagger:response ExportKeyResponse
+type exportKeyResponse struct {
+ // in:body
+ Body *APIV1POSTKeyExportResponse
+}
+
// APIV1POSTKeyResponse is the response to `POST /v1/key`
// friendly:GenerateKeyResponse
type APIV1POSTKeyResponse struct {
@@ -130,12 +208,26 @@ type APIV1POSTKeyResponse struct {
Address string `json:"address"`
}
+// Response to `POST /v1/key`
+// swagger:response GenerateKeyResponse
+type generateKeyResponse struct {
+ // in:body
+ Body *APIV1POSTKeyResponse
+}
+
// APIV1DELETEKeyResponse is the response to `DELETE /v1/key`
// friendly:DeleteKeyResponse
type APIV1DELETEKeyResponse struct {
APIV1ResponseEnvelope
}
+// Response to `DELETE /v1/key`
+// swagger:response DeleteKeyResponse
+type deleteKeyResponse struct {
+ // in:body
+ Body *APIV1DELETEKeyResponse
+}
+
// APIV1POSTKeyListResponse is the response to `POST /v1/key/list`
// friendly:ListKeysResponse
type APIV1POSTKeyListResponse struct {
@@ -143,6 +235,13 @@ type APIV1POSTKeyListResponse struct {
Addresses []string `json:"addresses"`
}
+// Response to `POST /v1/key/list`
+// swagger:response ListKeysResponse
+type listKeysResponse struct {
+ //in: body
+ Body *APIV1POSTKeyListResponse
+}
+
// APIV1POSTTransactionSignResponse is the repsonse to `POST /v1/transaction/sign`
// friendly:SignTransactionResponse
type APIV1POSTTransactionSignResponse struct {
@@ -152,6 +251,13 @@ type APIV1POSTTransactionSignResponse struct {
SignedTransaction []byte `json:"signed_transaction"`
}
+// Response to `POST /v1/transaction/sign`
+// swagger:response SignTransactionResponse
+type signTransactionResponse struct {
+ // in:body
+ Body *APIV1POSTTransactionSignResponse
+}
+
// APIV1POSTProgramSignResponse is the repsonse to `POST /v1/data/sign`
// friendly:SignProgramResponse
type APIV1POSTProgramSignResponse struct {
@@ -161,6 +267,13 @@ type APIV1POSTProgramSignResponse struct {
Signature []byte `json:"sig"`
}
+// Response to `POST /v1/data/sign`
+// swagger:response SignProgramResponse
+type signProgramResponse struct {
+ // in:body
+ Body *APIV1POSTProgramSignResponse
+}
+
// APIV1POSTMultisigListResponse is the response to `POST /v1/multisig/list`
// friendly:ListMultisigResponse
type APIV1POSTMultisigListResponse struct {
@@ -168,6 +281,13 @@ type APIV1POSTMultisigListResponse struct {
Addresses []string `json:"addresses"`
}
+// Response to `POST /v1/multisig/list`
+// swagger:response ListMultisigResponse
+type listMultisigResponse struct {
+ // in:body
+ Body *APIV1POSTMultisigListResponse
+}
+
// APIV1POSTMultisigImportResponse is the response to `POST /v1/multisig/import`
// friendly:ImportMultisigResponse
type APIV1POSTMultisigImportResponse struct {
@@ -175,6 +295,13 @@ type APIV1POSTMultisigImportResponse struct {
Address string `json:"address"`
}
+// Response to `POST /v1/multisig/import`
+// swagger:response ImportMultisigResponse
+type importMultisigResponse struct {
+ // in:body
+ Body *APIV1POSTMultisigImportResponse
+}
+
// APIV1POSTMultisigExportResponse is the response to `POST /v1/multisig/export`
// friendly:ExportMultisigResponse
type APIV1POSTMultisigExportResponse struct {
@@ -184,12 +311,26 @@ type APIV1POSTMultisigExportResponse struct {
PKs []APIV1PublicKey `json:"pks"`
}
+// Response to `POST /v1/multisig/export`
+// swagger:response ExportMultisigResponse
+type exportMultisigResponse struct {
+ // in:body
+ Body *APIV1POSTMultisigExportResponse
+}
+
// APIV1DELETEMultisigResponse is the response to POST /v1/multisig/delete`
// friendly:DeleteMultisigResponse
type APIV1DELETEMultisigResponse struct {
APIV1ResponseEnvelope
}
+// Response to POST /v1/multisig/delete
+// swagger:response DeleteMultisigResponse
+type deleteMultisigResponse struct {
+ // in:body
+ Body *APIV1DELETEMultisigResponse
+}
+
// APIV1POSTMultisigTransactionSignResponse is the response to `POST /v1/multisig/sign`
// friendly:SignMultisigResponse
type APIV1POSTMultisigTransactionSignResponse struct {
@@ -199,6 +340,13 @@ type APIV1POSTMultisigTransactionSignResponse struct {
Multisig []byte `json:"multisig"`
}
+// Response to `POST /v1/multisig/sign`
+// swagger:response SignMultisigResponse
+type signMultisigResponse struct {
+ // in:body
+ Body *APIV1POSTMultisigTransactionSignResponse
+}
+
// APIV1POSTMultisigProgramSignResponse is the response to `POST /v1/multisig/signdata`
// friendly:SignProgramMultisigResponse
type APIV1POSTMultisigProgramSignResponse struct {
@@ -207,3 +355,10 @@ type APIV1POSTMultisigProgramSignResponse struct {
// swagger:strfmt byte
Multisig []byte `json:"multisig"`
}
+
+// Response to `POST /v1/multisig/signdata`
+// swagger:response SignProgramMultisigResponse
+type signProgramMultisigResponse struct {
+ // in:body
+ Body *APIV1POSTMultisigProgramSignResponse
+}
diff --git a/data/account/account.go b/data/account/account.go
index 8be24cfb6..f9cd26944 100644
--- a/data/account/account.go
+++ b/data/account/account.go
@@ -134,7 +134,7 @@ func (root Root) Address() basics.Address {
// RestoreParticipation restores a Participation from a database
// handle.
-func RestoreParticipation(store db.Accessor) (acc Participation, err error) {
+func RestoreParticipation(store db.Accessor) (acc PersistedParticipation, err error) {
var rawParent, rawVRF, rawVoting []byte
err = Migrate(store)
@@ -163,30 +163,21 @@ func RestoreParticipation(store db.Accessor) (acc Participation, err error) {
return nil
})
if err != nil {
- return Participation{}, err
+ return PersistedParticipation{}, err
}
acc.VRF = &crypto.VRFSecrets{}
err = protocol.Decode(rawVRF, acc.VRF)
if err != nil {
- return Participation{}, err
+ return PersistedParticipation{}, err
}
acc.Voting = &crypto.OneTimeSignatureSecrets{}
err = protocol.Decode(rawVoting, acc.Voting)
if err != nil {
- return Participation{}, err
+ return PersistedParticipation{}, err
}
acc.Store = store
return acc, nil
}
-
-// A ParticipationInterval defines an interval for which a participation account is valid.
-type ParticipationInterval struct {
- basics.Address
-
- // FirstValid and LastValid are inclusive.
- FirstValid basics.Round
- LastValid basics.Round
-}
diff --git a/data/account/participation.go b/data/account/participation.go
index 8d130e915..269163c99 100644
--- a/data/account/participation.go
+++ b/data/account/participation.go
@@ -20,12 +20,12 @@ import (
"context"
"database/sql"
"fmt"
- "github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
)
@@ -41,8 +41,6 @@ import (
// For correctness, all Roots should have no more than one Participation
// globally active at any time. If this condition is violated, the Root may
// equivocate. (Algorand tolerates a limited fraction of misbehaving accounts.)
-//
-// Participations handle persistence and deletion of secrets.
type Participation struct {
Parent basics.Address
@@ -56,6 +54,13 @@ type Participation struct {
LastValid basics.Round
KeyDilution uint64
+}
+
+// PersistedParticipation encapsulates the static state of the participation
+// for a single address at any given moment, while providing the ability
+// to handle persistence and deletion of secrets.
+type PersistedParticipation struct {
+ Participation
Store db.Accessor
}
@@ -81,40 +86,6 @@ func (part Participation) OverlapsInterval(first, last basics.Round) bool {
return true
}
-// DeleteOldKeys securely deletes ephemeral keys for rounds strictly older than the given round.
-func (part Participation) DeleteOldKeys(current basics.Round, proto config.ConsensusParams) <-chan error {
- keyDilution := part.KeyDilution
- if keyDilution == 0 {
- keyDilution = proto.DefaultKeyDilution
- }
-
- part.Voting.DeleteBeforeFineGrained(basics.OneTimeIDForRound(current, keyDilution), keyDilution)
-
- errorCh := make(chan error, 1)
- deleteOldKeys := func(encodedVotingSecrets []byte) {
- errorCh <- part.Store.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- _, err := tx.Exec("UPDATE ParticipationAccount SET voting=?", encodedVotingSecrets)
- if err != nil {
- return fmt.Errorf("Participation.DeleteOldKeys: failed to update account: %v", err)
- }
- return nil
- })
- close(errorCh)
- }
- voting := part.Voting.Snapshot()
- encodedVotingSecrets := protocol.Encode(&voting)
- go deleteOldKeys(encodedVotingSecrets)
- return errorCh
-}
-
-// PersistNewParent writes a new parent address to the partkey database.
-func (part Participation) PersistNewParent() error {
- return part.Store.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- _, err := tx.Exec("UPDATE ParticipationAccount SET parent=?", part.Parent[:])
- return err
- })
-}
-
// VRFSecrets returns the VRF secrets associated with this Participation account.
func (part Participation) VRFSecrets() *crypto.VRFSecrets {
return part.VRF
@@ -135,7 +106,7 @@ func (part Participation) VotingSigner() crypto.OneTimeSigner {
}
// GenerateRegistrationTransaction returns a transaction object for registering a Participation with its parent.
-func (part Participation) GenerateRegistrationTransaction(fee basics.MicroAlgos, txnFirstValid, txnLastValid basics.Round, leaseBytes [32]byte, params config.ConsensusParams) transactions.Transaction {
+func (part Participation) GenerateRegistrationTransaction(fee basics.MicroAlgos, txnFirstValid, txnLastValid basics.Round, leaseBytes [32]byte) transactions.Transaction {
t := transactions.Transaction{
Type: protocol.KeyRegistrationTx,
Header: transactions.Header{
@@ -156,8 +127,42 @@ func (part Participation) GenerateRegistrationTransaction(fee basics.MicroAlgos,
return t
}
+// DeleteOldKeys securely deletes ephemeral keys for rounds strictly older than the given round.
+func (part PersistedParticipation) DeleteOldKeys(current basics.Round, proto config.ConsensusParams) <-chan error {
+ keyDilution := part.KeyDilution
+ if keyDilution == 0 {
+ keyDilution = proto.DefaultKeyDilution
+ }
+
+ part.Voting.DeleteBeforeFineGrained(basics.OneTimeIDForRound(current, keyDilution), keyDilution)
+
+ errorCh := make(chan error, 1)
+ deleteOldKeys := func(encodedVotingSecrets []byte) {
+ errorCh <- part.Store.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ _, err := tx.Exec("UPDATE ParticipationAccount SET voting=?", encodedVotingSecrets)
+ if err != nil {
+ return fmt.Errorf("Participation.DeleteOldKeys: failed to update account: %v", err)
+ }
+ return nil
+ })
+ close(errorCh)
+ }
+ voting := part.Voting.Snapshot()
+ encodedVotingSecrets := protocol.Encode(&voting)
+ go deleteOldKeys(encodedVotingSecrets)
+ return errorCh
+}
+
+// PersistNewParent writes a new parent address to the partkey database.
+func (part PersistedParticipation) PersistNewParent() error {
+ return part.Store.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ _, err := tx.Exec("UPDATE ParticipationAccount SET parent=?", part.Parent[:])
+ return err
+ })
+}
+
// FillDBWithParticipationKeys initializes the passed database with participation keys
-func FillDBWithParticipationKeys(store db.Accessor, address basics.Address, firstValid, lastValid basics.Round, keyDilution uint64) (part Participation, err error) {
+func FillDBWithParticipationKeys(store db.Accessor, address basics.Address, firstValid, lastValid basics.Round, keyDilution uint64) (part PersistedParticipation, err error) {
if lastValid < firstValid {
err = fmt.Errorf("FillDBWithParticipationKeys: lastValid %d is after firstValid %d", lastValid, firstValid)
return
@@ -175,40 +180,46 @@ func FillDBWithParticipationKeys(store db.Accessor, address basics.Address, firs
vrf := crypto.GenerateVRFSecrets()
// Construct the Participation containing these keys to be persisted
- part = Participation{
- Parent: address,
- VRF: vrf,
- Voting: v,
- FirstValid: firstValid,
- LastValid: lastValid,
- KeyDilution: keyDilution,
- Store: store,
+ part = PersistedParticipation{
+ Participation: Participation{
+ Parent: address,
+ VRF: vrf,
+ Voting: v,
+ FirstValid: firstValid,
+ LastValid: lastValid,
+ KeyDilution: keyDilution,
+ },
+ Store: store,
}
-
// Persist the Participation into the database
err = part.Persist()
return part, err
}
// Persist writes a Participation out to a database on the disk
-func (part Participation) Persist() error {
+func (part PersistedParticipation) Persist() error {
rawVRF := protocol.Encode(part.VRF)
voting := part.Voting.Snapshot()
rawVoting := protocol.Encode(&voting)
- return part.Store.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ err := part.Store.Atomic(func(ctx context.Context, tx *sql.Tx) error {
err := partInstallDatabase(tx)
if err != nil {
- return fmt.Errorf("Participation.persist: failed to install database: %v", err)
+ return fmt.Errorf("failed to install database: %w", err)
}
_, err = tx.Exec("INSERT INTO ParticipationAccount (parent, vrf, voting, firstValid, lastValid, keyDilution) VALUES (?, ?, ?, ?, ?, ?)",
part.Parent[:], rawVRF, rawVoting, part.FirstValid, part.LastValid, part.KeyDilution)
if err != nil {
- return fmt.Errorf("Participation.persist: failed to insert account: %v", err)
+ return fmt.Errorf("failed to insert account: %w", err)
}
return nil
})
+
+ if err != nil {
+ err = fmt.Errorf("PersistedParticipation.Persist: %w", err)
+ }
+ return err
}
// Migrate is called when loading participation keys.
@@ -220,6 +231,6 @@ func Migrate(partDB db.Accessor) error {
}
// Close closes the underlying database handle.
-func (part Participation) Close() {
+func (part PersistedParticipation) Close() {
part.Store.Close()
}
diff --git a/data/account/participation_test.go b/data/account/participation_test.go
index 134d3bc08..d80155d84 100644
--- a/data/account/participation_test.go
+++ b/data/account/participation_test.go
@@ -19,11 +19,13 @@ package account
import (
"context"
"database/sql"
+ "os"
"testing"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
@@ -50,6 +52,9 @@ func TestParticipation_NewDB(t *testing.T) {
versions, err := getSchemaVersions(partDB)
a.NoError(err)
a.Equal(versions[PartTableSchemaName], PartTableSchemaVersion)
+
+ partDB.Close()
+ rootDB.Close()
}
func getSchemaVersions(db db.Accessor) (versions map[string]int, err error) {
@@ -113,3 +118,29 @@ func TestOverlapsInterval(t *testing.T) {
a.True(interval.OverlapsInterval(end, end))
a.True(interval.OverlapsInterval(end, after))
}
+
+func BenchmarkOldKeysDeletion(b *testing.B) {
+ a := require.New(b)
+
+ var rootAddr basics.Address
+ crypto.RandBytes(rootAddr[:])
+
+ partDB, err := db.MakeErasableAccessor(b.Name() + "_part")
+ a.NoError(err)
+ a.NotNil(partDB)
+ defer func() {
+ os.Remove(b.Name() + "_part")
+ }()
+
+ part, err := FillDBWithParticipationKeys(partDB, rootAddr, 0, 3000000, config.Consensus[protocol.ConsensusCurrentVersion].DefaultKeyDilution)
+ a.NoError(err)
+ a.NotNil(part)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ errCh := part.DeleteOldKeys(basics.Round(i), config.Consensus[protocol.ConsensusCurrentVersion])
+ err := <-errCh
+ a.NoError(err)
+ }
+ part.Close()
+}
diff --git a/data/accountManager.go b/data/accountManager.go
index ba39094cb..79a57287b 100644
--- a/data/accountManager.go
+++ b/data/accountManager.go
@@ -22,6 +22,7 @@ import (
"github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
@@ -30,11 +31,23 @@ import (
"github.com/algorand/go-algorand/protocol"
)
+// A ParticipationKeyIdentity defines the parameters that makes a pariticpation key unique.
+type ParticipationKeyIdentity struct {
+ basics.Address // the address this participation key is used to vote for.
+
+ // FirstValid and LastValid are inclusive.
+ FirstValid basics.Round
+ LastValid basics.Round
+
+ VoteID crypto.OneTimeSignatureVerifier
+ SelectionID crypto.VrfPubkey
+}
+
// AccountManager loads and manages accounts for the node
type AccountManager struct {
mu deadlock.Mutex
- partIntervals map[account.ParticipationInterval]account.Participation
+ partKeys map[ParticipationKeyIdentity]account.PersistedParticipation
// Map to keep track of accounts for which we've sent
// AccountRegistered telemetry events
@@ -47,19 +60,21 @@ type AccountManager struct {
func MakeAccountManager(log logging.Logger) *AccountManager {
manager := &AccountManager{}
manager.log = log
- manager.partIntervals = make(map[account.ParticipationInterval]account.Participation)
+ manager.partKeys = make(map[ParticipationKeyIdentity]account.PersistedParticipation)
manager.registeredAccounts = make(map[string]bool)
return manager
}
// Keys returns a list of Participation accounts.
-func (manager *AccountManager) Keys() (out []account.Participation) {
+func (manager *AccountManager) Keys(rnd basics.Round) (out []account.Participation) {
manager.mu.Lock()
defer manager.mu.Unlock()
- for _, part := range manager.partIntervals {
- out = append(out, part)
+ for _, part := range manager.partKeys {
+ if part.OverlapsInterval(rnd, rnd) {
+ out = append(out, part.Participation)
+ }
}
return out
}
@@ -70,7 +85,7 @@ func (manager *AccountManager) HasLiveKeys(from, to basics.Round) bool {
manager.mu.Lock()
defer manager.mu.Unlock()
- for _, part := range manager.partIntervals {
+ for _, part := range manager.partKeys {
if part.OverlapsInterval(from, to) {
return true
}
@@ -81,26 +96,28 @@ func (manager *AccountManager) HasLiveKeys(from, to basics.Round) bool {
// AddParticipation adds a new account.Participation to be managed.
// The return value indicates if the key has been added (true) or
// if this is a duplicate key (false).
-func (manager *AccountManager) AddParticipation(participation account.Participation) bool {
+func (manager *AccountManager) AddParticipation(participation account.PersistedParticipation) bool {
manager.mu.Lock()
defer manager.mu.Unlock()
address := participation.Address()
first, last := participation.ValidInterval()
- interval := account.ParticipationInterval{
- Address: address,
- FirstValid: first,
- LastValid: last,
+ partkeyID := ParticipationKeyIdentity{
+ Address: address,
+ FirstValid: first,
+ LastValid: last,
+ VoteID: participation.Voting.OneTimeSignatureVerifier,
+ SelectionID: participation.VRF.PK,
}
// Check if we already have participation keys for this address in this interval
- _, alreadyPresent := manager.partIntervals[interval]
+ _, alreadyPresent := manager.partKeys[partkeyID]
if alreadyPresent {
return false
}
- manager.partIntervals[interval] = participation
+ manager.partKeys[partkeyID] = participation
addressString := address.String()
manager.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.PartKeyRegisteredEvent, telemetryspec.PartKeyRegisteredEventDetails{
@@ -127,10 +144,10 @@ func (manager *AccountManager) DeleteOldKeys(latestHdr bookkeeping.BlockHeader,
latestProto := config.Consensus[latestHdr.CurrentProtocol]
manager.mu.Lock()
- pendingItems := make(map[string]<-chan error, len(manager.partIntervals))
+ pendingItems := make(map[string]<-chan error, len(manager.partKeys))
func() {
defer manager.mu.Unlock()
- for _, part := range manager.partIntervals {
+ for _, part := range manager.partKeys {
// We need a key for round r+1 for agreement.
nextRound := latestHdr.Round + 1
diff --git a/data/common_test.go b/data/common_test.go
index 9df9ac914..5390c1b5a 100644
--- a/data/common_test.go
+++ b/data/common_test.go
@@ -49,7 +49,7 @@ func keypair() *crypto.SignatureSecrets {
return s
}
-func testingenv(t testing.TB, numAccounts, numTxs int, offlineAccounts bool) (*Ledger, []account.Root, []account.Participation, []transactions.SignedTxn, func()) {
+func testingenv(t testing.TB, numAccounts, numTxs int, offlineAccounts bool) (*Ledger, []account.Root, []account.PersistedParticipation, []transactions.SignedTxn, func()) {
P := numAccounts // n accounts
TXs := numTxs // n txns
maxMoneyAtStart := 1000000 // max money start
@@ -71,7 +71,7 @@ func testingenv(t testing.TB, numAccounts, numTxs int, offlineAccounts bool) (*L
genesis := make(map[basics.Address]basics.AccountData)
gen := rand.New(rand.NewSource(2))
roots := make([]account.Root, P)
- parts := make([]account.Participation, P)
+ parts := make([]account.PersistedParticipation, P)
for i := 0; i < P; i++ {
access, err := db.MakeAccessor(t.Name()+"_root_testingenv"+strconv.Itoa(i), false, true)
if err != nil {
diff --git a/data/ledger.go b/data/ledger.go
index 9da66ac12..02de26a2e 100644
--- a/data/ledger.go
+++ b/data/ledger.go
@@ -322,7 +322,7 @@ func (l *Ledger) ConsensusVersion(r basics.Round) (protocol.ConsensusVersion, er
return blockhdr.UpgradeState.CurrentProtocol, nil
}
// try to see if we can figure out what the version would be.
- latestRound := l.Latest()
+ latestCommittedRound, latestRound := l.LatestCommitted()
// if the request round was for an older round, then just say the we don't know.
if r < latestRound {
return "", err
@@ -344,7 +344,7 @@ func (l *Ledger) ConsensusVersion(r basics.Round) (protocol.ConsensusVersion, er
return latestBlockhdr.CurrentProtocol, nil
}
// otherwise, we can't really tell.
- return "", ledgercore.ErrNoEntry{Round: r, Latest: latestRound, Committed: latestRound}
+ return "", ledgercore.ErrNoEntry{Round: r, Latest: latestRound, Committed: latestCommittedRound}
}
// in this case, we do have a protocol upgrade taking place.
if r < latestBlockhdr.NextProtocolSwitchOn {
@@ -356,7 +356,7 @@ func (l *Ledger) ConsensusVersion(r basics.Round) (protocol.ConsensusVersion, er
if r == latestBlockhdr.NextProtocolSwitchOn && latestBlockhdr.Round >= latestBlockhdr.NextProtocolVoteBefore {
return latestBlockhdr.NextProtocol, nil
}
- err = ledgercore.ErrNoEntry{Round: r, Latest: latestRound, Committed: latestRound}
+ err = ledgercore.ErrNoEntry{Round: r, Latest: latestRound, Committed: latestCommittedRound}
}
// otherwise, we can't really tell what the protocol version would be at round r.
return "", err
diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go
index 58cf837c7..c44d127d0 100644
--- a/data/pools/transactionPool.go
+++ b/data/pools/transactionPool.go
@@ -556,10 +556,7 @@ func (pool *TransactionPool) addToPendingBlockEvaluatorOnce(txgroup []transactio
}
}
- txgroupad := make([]transactions.SignedTxnWithAD, len(txgroup))
- for i, tx := range txgroup {
- txgroupad[i].SignedTxn = tx
- }
+ txgroupad := transactions.WrapSignedTxnsWithAD(txgroup)
transactionGroupStartsTime := time.Time{}
if recomputing {
@@ -698,7 +695,7 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact
}
switch err.(type) {
- case ledgercore.TransactionInLedgerError:
+ case *ledgercore.TransactionInLedgerError:
asmStats.CommittedCount++
stats.RemovedInvalidCount++
case transactions.TxnDeadError:
diff --git a/data/transactions/logic/.gitignore b/data/transactions/logic/.gitignore
new file mode 100644
index 000000000..24f8b4a36
--- /dev/null
+++ b/data/transactions/logic/.gitignore
@@ -0,0 +1,2 @@
+langspec.json
+teal.tmLanguage.json
diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md
index 263e58ffb..96331db34 100644
--- a/data/transactions/logic/README.md
+++ b/data/transactions/logic/README.md
@@ -27,7 +27,7 @@ A program can either authorize some delegated action on a normal private key sig
* If the account has signed the program (an ed25519 signature on "Program" concatenated with the program bytes) then if the program returns true the transaction is authorized as if the account had signed it. This allows an account to hand out a signed program so that other users can carry out delegated actions which are approved by the program.
* If the SHA512_256 hash of the program (prefixed by "Program") is equal to the transaction Sender address then this is a contract account wholly controlled by the program. No other signature is necessary or possible. The only way to execute a transaction against the contract account is for the program to approve it.
-The TEAL bytecode plus the length of any Args must add up to less than 1000 bytes (consensus parameter LogicSigMaxSize). Each TEAL op has an associated cost estimate and the program cost estimate must total less than 20000 (consensus parameter LogicSigMaxCost). Most ops have an estimated cost of 1, but a few slow crypto ops are much higher.
+The TEAL bytecode plus the length of any Args must add up to less than 1000 bytes (consensus parameter LogicSigMaxSize). Each TEAL op has an associated cost and the program cost must total less than 20000 (consensus parameter LogicSigMaxCost). Most ops have a cost of 1, but a few slow crypto ops are much higher. Prior to v4, program costs was estimated as the static sum of all opcode costs in a program (ignoring conditionals that might skip some code). Beginning with v4, a program's cost is tracked dynamically, while being evaluated. If the program exceeds its budget, it fails.
## Execution modes
@@ -80,11 +80,6 @@ An application transaction must indicate the action to be taken following the ex
## Operations
Most operations work with only one type of argument, uint64 or bytes, and panic if the wrong type value is on the stack.
-The instruction set was designed to execute calculator-like expressions.
-What might be a one line expression with various parenthesized clauses should be efficiently representable in TEAL.
-
-Looping is not possible, by design, to ensure predictably fast execution.
-There is a branch instruction (`bnz`, branch if not zero) which allows forward branching only so that some code may be skipped.
Many programs need only a few dozen instructions. The instruction set has some optimization built in. `intc`, `bytec`, and `arg` take an immediate value byte, making a 2-byte op to load a value onto the stack, but they also have single byte versions for loading the most common constant values. Any program will benefit from having a few common values loaded with a smaller one byte opcode. Cryptographic hashes and `ed25519verify` are single byte opcodes with powerful libraries behind them. These operations still take more time than other ops (and this is reflected in the cost of each op and the cost limit of a program) but are efficient in compiled code space.
@@ -129,6 +124,7 @@ For two-argument ops, `A` is the previous element on the stack and `B` is the la
| `~` | bitwise invert value X |
| `mulw` | A times B out to 128-bit long result as low (top) and high uint64 values on the stack |
| `addw` | A plus B out to 128-bit long result as sum (top) and carry-bit uint64 values on the stack |
+| `divw` | Pop four uint64 values. The deepest two are interpreted as a uint128 dividend (deepest value is high word), the top two are interpreted as a uint128 divisor. Four uint64 values are pushed to the stack. The deepest two are the quotient (deeper value is the high uint64). The top two are the remainder, low bits on top. |
| `getbit` | pop a target A (integer or byte-array), and index B. Push the Bth bit of A. |
| `setbit` | pop a target A, index B, and bit C. Set the Bth bit of A to C, and push the result |
| `getbyte` | pop a byte-array A and integer B. Extract the Bth byte of A and push it as an integer |
@@ -297,6 +293,8 @@ Asset fields include `AssetHolding` and `AssetParam` fields that are used in `as
| `swap` | swaps two last values on stack: A, B -> B, A |
| `select` | selects one of two values based on top-of-stack: A, B, C -> (if C != 0 then B else A) |
| `assert` | immediately fail unless value X is a non-zero number |
+| `callsub target` | branch unconditionally to TARGET, saving the next instruction on the call stack |
+| `retsub` | pop the top instruction from the call stack and branch to it |
### State Access
@@ -384,12 +382,13 @@ A '[proto-buf style variable length unsigned int](https://developers.google.com/
# What TEAL Cannot Do
-Current design and implementation limitations to be aware of.
+Design and implementation limitations to be aware of with various versions of TEAL.
* TEAL cannot create or change a transaction, only approve or reject.
* Stateless TEAL cannot lookup balances of Algos or other assets. (Standard transaction accounting will apply after TEAL has run and authorized a transaction. A TEAL-approved transaction could still be invalid by other accounting rules just as a standard signed transaction could be invalid. e.g. I can't give away money I don't have.)
* TEAL cannot access information in previous blocks. TEAL cannot access most information in other transactions in the current block. (TEAL can access fields of the transaction it is attached to and the transactions in an atomic transaction group.)
* TEAL cannot know exactly what round the current transaction will commit in (but it is somewhere in FirstValid through LastValid).
* TEAL cannot know exactly what time its transaction is committed.
-* TEAL cannot loop. Its branch instructions `bnz` "branch if not zero", `bz` "branch if zero" and `b` "branch" can only branch forward so as to skip some code.
-* TEAL cannot recurse. There is no subroutine jump operation.
+* TEAL cannot loop prior to v4. In v3 and prior, the branch instructions `bnz` "branch if not zero", `bz` "branch if zero" and `b` "branch" can only branch forward so as to skip some code.
+* Until v4, TEAL had no notion of subroutines (and therefore no recursion). As of v4, use `callsub` and `retsub`.
+* TEAL cannot make indirect jumps. `b`, `bz`, `bnz`, and `callsub` jump to an immediately specified address, and `retsub` jumps to the address currently on the top of the call stack, which is manipulated only by previous calls to `callsub`.
diff --git a/data/transactions/logic/README_in.md b/data/transactions/logic/README_in.md
index ef43d5230..e8961a8d9 100644
--- a/data/transactions/logic/README_in.md
+++ b/data/transactions/logic/README_in.md
@@ -27,7 +27,7 @@ A program can either authorize some delegated action on a normal private key sig
* If the account has signed the program (an ed25519 signature on "Program" concatenated with the program bytes) then if the program returns true the transaction is authorized as if the account had signed it. This allows an account to hand out a signed program so that other users can carry out delegated actions which are approved by the program.
* If the SHA512_256 hash of the program (prefixed by "Program") is equal to the transaction Sender address then this is a contract account wholly controlled by the program. No other signature is necessary or possible. The only way to execute a transaction against the contract account is for the program to approve it.
-The TEAL bytecode plus the length of any Args must add up to less than 1000 bytes (consensus parameter LogicSigMaxSize). Each TEAL op has an associated cost estimate and the program cost estimate must total less than 20000 (consensus parameter LogicSigMaxCost). Most ops have an estimated cost of 1, but a few slow crypto ops are much higher.
+The TEAL bytecode plus the length of any Args must add up to less than 1000 bytes (consensus parameter LogicSigMaxSize). Each TEAL op has an associated cost and the program cost must total less than 20000 (consensus parameter LogicSigMaxCost). Most ops have a cost of 1, but a few slow crypto ops are much higher. Prior to v4, program costs was estimated as the static sum of all opcode costs in a program (ignoring conditionals that might skip some code). Beginning with v4, a program's cost is tracked dynamically, while being evaluated. If the program exceeds its budget, it fails.
## Execution modes
@@ -57,11 +57,6 @@ Constants are pushed onto the stack by `intc`, `intc_[0123]`, `bytec`, and `byte
## Operations
Most operations work with only one type of argument, uint64 or bytes, and panic if the wrong type value is on the stack.
-The instruction set was designed to execute calculator-like expressions.
-What might be a one line expression with various parenthesized clauses should be efficiently representable in TEAL.
-
-Looping is not possible, by design, to ensure predictably fast execution.
-There is a branch instruction (`bnz`, branch if not zero) which allows forward branching only so that some code may be skipped.
Many programs need only a few dozen instructions. The instruction set has some optimization built in. `intc`, `bytec`, and `arg` take an immediate value byte, making a 2-byte op to load a value onto the stack, but they also have single byte versions for loading the most common constant values. Any program will benefit from having a few common values loaded with a smaller one byte opcode. Cryptographic hashes and `ed25519verify` are single byte opcodes with powerful libraries behind them. These operations still take more time than other ops (and this is reflected in the cost of each op and the cost limit of a program) but are efficient in compiled code space.
@@ -183,12 +178,13 @@ A '[proto-buf style variable length unsigned int](https://developers.google.com/
# What TEAL Cannot Do
-Current design and implementation limitations to be aware of.
+Design and implementation limitations to be aware of with various versions of TEAL.
* TEAL cannot create or change a transaction, only approve or reject.
* Stateless TEAL cannot lookup balances of Algos or other assets. (Standard transaction accounting will apply after TEAL has run and authorized a transaction. A TEAL-approved transaction could still be invalid by other accounting rules just as a standard signed transaction could be invalid. e.g. I can't give away money I don't have.)
* TEAL cannot access information in previous blocks. TEAL cannot access most information in other transactions in the current block. (TEAL can access fields of the transaction it is attached to and the transactions in an atomic transaction group.)
* TEAL cannot know exactly what round the current transaction will commit in (but it is somewhere in FirstValid through LastValid).
* TEAL cannot know exactly what time its transaction is committed.
-* TEAL cannot loop. Its branch instructions `bnz` "branch if not zero", `bz` "branch if zero" and `b` "branch" can only branch forward so as to skip some code.
-* TEAL cannot recurse. There is no subroutine jump operation.
+* TEAL cannot loop prior to v4. In v3 and prior, the branch instructions `bnz` "branch if not zero", `bz` "branch if zero" and `b` "branch" can only branch forward so as to skip some code.
+* Until v4, TEAL had no notion of subroutines (and therefore no recursion). As of v4, use `callsub` and `retsub`.
+* TEAL cannot make indirect jumps. `b`, `bz`, `bnz`, and `callsub` jump to an immediately specified address, and `retsub` jumps to the address currently on the top of the call stack, which is manipulated only by previous calls to `callsub`.
diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md
index 7a3ab2e53..4768c53e6 100644
--- a/data/transactions/logic/TEAL_opcodes.md
+++ b/data/transactions/logic/TEAL_opcodes.md
@@ -18,8 +18,7 @@ Ops have a 'cost' of 1 unless otherwise specified.
- SHA256 hash of value X, yields [32]byte
- **Cost**:
- 7 (LogicSigVersion = 1)
- - 35 (LogicSigVersion = 2)
- - 35 (LogicSigVersion = 3)
+ - 35 (2 <= LogicSigVersion <= 4)
## keccak256
@@ -29,8 +28,7 @@ Ops have a 'cost' of 1 unless otherwise specified.
- Keccak256 hash of value X, yields [32]byte
- **Cost**:
- 26 (LogicSigVersion = 1)
- - 130 (LogicSigVersion = 2)
- - 130 (LogicSigVersion = 3)
+ - 130 (2 <= LogicSigVersion <= 4)
## sha512_256
@@ -40,8 +38,7 @@ Ops have a 'cost' of 1 unless otherwise specified.
- SHA512_256 hash of value X, yields [32]byte
- **Cost**:
- 9 (LogicSigVersion = 1)
- - 45 (LogicSigVersion = 2)
- - 45 (LogicSigVersion = 3)
+ - 45 (2 <= LogicSigVersion <= 4)
## ed25519verify
@@ -77,6 +74,8 @@ Overflow is an error condition which halts execution and fails the transaction.
- Pushes: uint64
- A divided by B. Panic if B == 0.
+`divw` is available to divide the two-element values produced by `mulw` and `addw`.
+
## *
- Opcode: 0x0b
@@ -222,6 +221,14 @@ Overflow is an error condition which halts execution and fails the transaction.
- A plus B out to 128-bit long result as sum (top) and carry-bit uint64 values on the stack
- LogicSigVersion >= 2
+## divw
+
+- Opcode: 0x1f
+- Pops: *... stack*, {uint64 A}, {uint64 B}, {uint64 C}, {uint64 D}
+- Pushes: *... stack*, uint64, uint64, uint64, uint64
+- Pop four uint64 values. The deepest two are interpreted as a uint128 dividend (deepest value is high word), the top two are interpreted as a uint128 divisor. Four uint64 values are pushed to the stack. The deepest two are the quotient (deeper value is the high uint64). The top two are the remainder, low bits on top.
+- LogicSigVersion >= 4
+
## intcblock uint ...
- Opcode: 0x20 {varuint length} [{varuint value}, ...]
@@ -516,18 +523,18 @@ for notes on transaction fields available, see `txn`. If top of stack is _i_, `g
## bnz target
-- Opcode: 0x40 {0..0x7fff forward branch offset, big endian}
+- Opcode: 0x40 {int16 branch offset, big endian. (negative offsets are illegal before v4)}
- Pops: *... stack*, uint64
- Pushes: _None_
- branch to TARGET if value X is not zero
-The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be well aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Branch offsets are currently limited to forward branches only, 0-0x7fff. A future expansion might make this a signed 16 bit integer allowing for backward branches and looping.
+The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be well aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Branch offsets are limited to forward branches only, 0-0x7fff until v4. v4 treats offset as a signed 16 bit integer allowing for backward branches and looping.
At LogicSigVersion 2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before LogicSigVersion 2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.)
## bz target
-- Opcode: 0x41 {0..0x7fff forward branch offset, big endian}
+- Opcode: 0x41 {int16 branch offset, big endian. (negative offsets are illegal before v4)}
- Pops: *... stack*, uint64
- Pushes: _None_
- branch to TARGET if value X is zero
@@ -537,7 +544,7 @@ See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`.
## b target
-- Opcode: 0x42 {0..0x7fff forward branch offset, big endian}
+- Opcode: 0x42 {int16 branch offset, big endian. (negative offsets are illegal before v4)}
- Pops: _None_
- Pushes: _None_
- branch unconditionally to TARGET
@@ -843,6 +850,8 @@ params: txn.ForeignAssets offset. Return: did_exist flag (1 if exist and 0 other
- push the following program bytes to the stack
- LogicSigVersion >= 3
+pushbytes args are not added to the bytecblock during assembly processes
+
## pushint uint
- Opcode: 0x81 {varuint int}
@@ -850,3 +859,25 @@ params: txn.ForeignAssets offset. Return: did_exist flag (1 if exist and 0 other
- Pushes: uint64
- push immediate UINT to the stack as an integer
- LogicSigVersion >= 3
+
+pushint args are not added to the intcblock during assembly processes
+
+## callsub target
+
+- Opcode: 0x88
+- Pops: _None_
+- Pushes: _None_
+- branch unconditionally to TARGET, saving the next instruction on the call stack
+- LogicSigVersion >= 4
+
+The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.`
+
+## retsub
+
+- Opcode: 0x89
+- Pops: _None_
+- Pushes: _None_
+- pop the top instruction from the call stack and branch to it
+- LogicSigVersion >= 4
+
+The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.`
diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go
index 99e2d28e3..5c4d864d0 100644
--- a/data/transactions/logic/assembler.go
+++ b/data/transactions/logic/assembler.go
@@ -80,6 +80,8 @@ type OpStream struct {
// map opcode offsets to source line
OffsetToLine map[int]int
+
+ HasStatefulOps bool
}
// GetVersion returns the LogicSigVersion we're building to
@@ -97,7 +99,7 @@ func (ops *OpStream) createLabel(label string) {
ops.labels = make(map[string]int)
}
if _, ok := ops.labels[label]; ok {
- ops.errorf("duplicate label %s", label)
+ ops.errorf("duplicate label %#v", label)
}
ops.labels[label] = ops.pending.Len()
}
@@ -115,8 +117,14 @@ func (ops *OpStream) ReferToLabel(pc int, label string) {
ops.labelReferences = append(ops.labelReferences, labelReference{ops.sourceLine, pc, label})
}
-func (ops *OpStream) tpush(argType StackType) {
- ops.typeStack = append(ops.typeStack, argType)
+// returns allows opcodes like `txn` to be specific about their return
+// value types, based on the field requested, rather than use Any as
+// specified by opSpec.
+func (ops *OpStream) returns(argTypes ...StackType) {
+ for range argTypes {
+ ops.tpop()
+ }
+ ops.tpusha(argTypes)
}
func (ops *OpStream) tpusha(argType []StackType) {
@@ -221,146 +229,7 @@ func (ops *OpStream) ByteLiteral(val []byte) {
ops.Bytec(constIndex)
}
-// Arg writes opcodes for loading from Lsig.Args
-func (ops *OpStream) Arg(val uint64) error {
- switch val {
- case 0:
- ops.pending.WriteByte(0x2d) // arg_0
- case 1:
- ops.pending.WriteByte(0x2e) // arg_1
- case 2:
- ops.pending.WriteByte(0x2f) // arg_2
- case 3:
- ops.pending.WriteByte(0x30) // arg_3
- default:
- if val > 0xff {
- return ops.error("cannot have more than 256 args")
- }
- ops.pending.WriteByte(0x2c)
- ops.pending.WriteByte(uint8(val))
- }
- return nil
-}
-
-// Txn writes opcodes for loading a field from the current transaction
-func (ops *OpStream) Txn(val uint64) {
- if val >= uint64(len(TxnFieldNames)) {
- ops.errorf("invalid txn field: %d", val)
- }
- ops.pending.WriteByte(0x31)
- ops.pending.WriteByte(uint8(val))
- ops.tpush(TxnFieldTypes[val])
-}
-
-// Txna writes opcodes for loading array field from the current transaction
-func (ops *OpStream) Txna(fieldNum uint64, arrayFieldIdx uint64) {
- if fieldNum >= uint64(len(TxnFieldNames)) {
- ops.errorf("invalid txn field: %d", fieldNum)
- fieldNum = 0 // avoid further error in tpush as we forge ahead
- }
- if arrayFieldIdx > 255 {
- ops.errorf("txna array index beyond 255: %d", arrayFieldIdx)
- }
- ops.pending.WriteByte(0x36)
- ops.pending.WriteByte(uint8(fieldNum))
- ops.pending.WriteByte(uint8(arrayFieldIdx))
- ops.tpush(TxnFieldTypes[fieldNum])
-}
-
-// Gtxn writes opcodes for loading a field from the current transaction
-func (ops *OpStream) Gtxn(gid, val uint64) {
- if val >= uint64(len(TxnFieldNames)) {
- ops.errorf("invalid gtxn field: %d", val)
- val = 0 // avoid further error in tpush as we forge ahead
- }
- if gid > 255 {
- ops.errorf("gtxn transaction index beyond 255: %d", gid)
- }
- ops.pending.WriteByte(0x33)
- ops.pending.WriteByte(uint8(gid))
- ops.pending.WriteByte(uint8(val))
- ops.tpush(TxnFieldTypes[val])
-}
-
-// Gtxna writes opcodes for loading an array field from the current transaction
-func (ops *OpStream) Gtxna(gid, fieldNum uint64, arrayFieldIdx uint64) {
- if fieldNum >= uint64(len(TxnFieldNames)) {
- ops.errorf("invalid txn field: %d", fieldNum)
- fieldNum = 0 // avoid further error in tpush as we forge ahead
- }
- if gid > 255 {
- ops.errorf("gtxna group index beyond 255: %d", gid)
- }
- if arrayFieldIdx > 255 {
- ops.errorf("gtxna array index beyond 255: %d", arrayFieldIdx)
- }
- ops.pending.WriteByte(0x37)
- ops.pending.WriteByte(uint8(gid))
- ops.pending.WriteByte(uint8(fieldNum))
- ops.pending.WriteByte(uint8(arrayFieldIdx))
- ops.tpush(TxnFieldTypes[fieldNum])
-}
-
-// Gtxns writes opcodes for loading a field from the current transaction
-func (ops *OpStream) Gtxns(fieldNum uint64) {
- if fieldNum >= uint64(len(TxnFieldNames)) {
- ops.errorf("invalid gtxns field: %d", fieldNum)
- fieldNum = 0 // avoid further error in tpush as we forge ahead
- }
- ops.pending.WriteByte(0x38)
- ops.pending.WriteByte(uint8(fieldNum))
- ops.tpush(TxnFieldTypes[fieldNum])
-}
-
-// Gtxnsa writes opcodes for loading an array field from the current transaction
-func (ops *OpStream) Gtxnsa(fieldNum uint64, arrayFieldIdx uint64) {
- if fieldNum >= uint64(len(TxnFieldNames)) {
- ops.errorf("invalid gtxnsa field: %d", fieldNum)
- fieldNum = 0 // avoid further error in tpush as we forge ahead
- }
- if arrayFieldIdx > 255 {
- ops.errorf("gtxnsa array index beyond 255: %d", arrayFieldIdx)
- }
- ops.pending.WriteByte(0x39)
- ops.pending.WriteByte(uint8(fieldNum))
- ops.pending.WriteByte(uint8(arrayFieldIdx))
- ops.tpush(TxnFieldTypes[fieldNum])
-}
-
-// Global writes opcodes for loading an evaluator-global field
-func (ops *OpStream) Global(val GlobalField) {
- ops.pending.WriteByte(0x32)
- ops.pending.WriteByte(uint8(val))
- ops.trace("%s (%s)", GlobalFieldNames[val], GlobalFieldTypes[val].String())
- ops.tpush(GlobalFieldTypes[val])
-}
-
-// AssetHolding writes opcodes for accessing data from AssetHolding
-func (ops *OpStream) AssetHolding(val uint64) {
- if val >= uint64(len(AssetHoldingFieldNames)) {
- ops.errorf("invalid asset holding field: %d", val)
- val = 0 // avoid further error in tpush as we forge ahead
- }
- ops.pending.WriteByte(OpsByName[ops.Version]["asset_holding_get"].Opcode)
- ops.pending.WriteByte(uint8(val))
- ops.tpush(AssetHoldingFieldTypes[val])
- ops.tpush(StackUint64)
-}
-
-// AssetParams writes opcodes for accessing data from AssetParams
-func (ops *OpStream) AssetParams(val uint64) {
- if val >= uint64(len(AssetParamsFieldNames)) {
- ops.errorf("invalid asset params field: %d", val)
- val = 0 // avoid further error in tpush as we forge ahead
- }
- ops.pending.WriteByte(OpsByName[ops.Version]["asset_params_get"].Opcode)
- ops.pending.WriteByte(uint8(val))
- ops.tpush(AssetParamsFieldTypes[val])
- ops.tpush(StackUint64)
-}
-
func assembleInt(ops *OpStream, spec *OpSpec, args []string) error {
- ops.checkArgs(*spec)
if len(args) != 1 {
return ops.error("int needs one argument")
}
@@ -392,7 +261,6 @@ func assembleInt(ops *OpStream, spec *OpSpec, args []string) error {
// Explicit invocation of const lookup and push
func assembleIntC(ops *OpStream, spec *OpSpec, args []string) error {
- ops.checkArgs(*spec)
if len(args) != 1 {
return ops.error("intc operation needs one argument")
}
@@ -404,7 +272,6 @@ func assembleIntC(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
func assembleByteC(ops *OpStream, spec *OpSpec, args []string) error {
- ops.checkArgs(*spec)
if len(args) != 1 {
ops.error("bytec operation needs one argument")
}
@@ -417,7 +284,6 @@ func assembleByteC(ops *OpStream, spec *OpSpec, args []string) error {
}
func asmPushInt(ops *OpStream, spec *OpSpec, args []string) error {
- ops.checkArgs(*spec)
if len(args) != 1 {
ops.errorf("%s needs one argument", spec.Name)
}
@@ -432,7 +298,6 @@ func asmPushInt(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
func asmPushBytes(ops *OpStream, spec *OpSpec, args []string) error {
- ops.checkArgs(*spec)
if len(args) != 1 {
ops.errorf("%s needs one argument", spec.Name)
}
@@ -596,7 +461,6 @@ func parseStringLiteral(input string) (result []byte, err error) {
// byte 0x....
// byte "this is a string\n"
func assembleByte(ops *OpStream, spec *OpSpec, args []string) error {
- ops.checkArgs(*spec)
if len(args) == 0 {
return ops.error("byte operation needs byte literal argument")
}
@@ -609,7 +473,7 @@ func assembleByte(ops *OpStream, spec *OpSpec, args []string) error {
}
func assembleIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
- ops.pending.WriteByte(0x20) // intcblock
+ ops.pending.WriteByte(spec.Opcode)
var scratch [binary.MaxVarintLen64]byte
l := binary.PutUvarint(scratch[:], uint64(len(args)))
ops.pending.Write(scratch[:l])
@@ -628,7 +492,7 @@ func assembleIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
}
func assembleByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
- ops.pending.WriteByte(0x26) // bytecblock
+ ops.pending.WriteByte(spec.Opcode)
bvals := make([][]byte, 0, len(args))
rest := args
for len(rest) > 0 {
@@ -660,7 +524,6 @@ func assembleByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
// addr A1EU...
// parses base32-with-checksum account address strings into a byte literal
func assembleAddr(ops *OpStream, spec *OpSpec, args []string) error {
- ops.checkArgs(*spec)
if len(args) != 1 {
return ops.error("addr operation needs one argument")
}
@@ -673,7 +536,6 @@ func assembleAddr(ops *OpStream, spec *OpSpec, args []string) error {
}
func assembleArg(ops *OpStream, spec *OpSpec, args []string) error {
- ops.checkArgs(*spec)
if len(args) != 1 {
return ops.error("arg operation needs one argument")
}
@@ -681,12 +543,24 @@ func assembleArg(ops *OpStream, spec *OpSpec, args []string) error {
if err != nil {
return ops.error(err)
}
- ops.Arg(val)
- return nil
+ altSpec := *spec
+ if val < 4 {
+ switch val {
+ case 0:
+ altSpec = OpsByName[ops.Version]["arg_0"]
+ case 1:
+ altSpec = OpsByName[ops.Version]["arg_1"]
+ case 2:
+ altSpec = OpsByName[ops.Version]["arg_2"]
+ case 3:
+ altSpec = OpsByName[ops.Version]["arg_3"]
+ }
+ args = []string{}
+ }
+ return asmDefault(ops, &altSpec, args)
}
func assembleBranch(ops *OpStream, spec *OpSpec, args []string) error {
- ops.checkArgs(*spec)
if len(args) != 1 {
return ops.error("branch operation needs label argument")
}
@@ -716,17 +590,18 @@ func assembleTxn(ops *OpStream, spec *OpSpec, args []string) error {
}
fs, ok := txnFieldSpecByName[args[0]]
if !ok {
- return ops.errorf("txn unknown field: %v", args[0])
+ return ops.errorf("txn unknown field: %#v", args[0])
}
_, ok = txnaFieldSpecByField[fs.field]
if ok {
- return ops.errorf("found array field %v in txn op", args[0])
+ return ops.errorf("found array field %#v in txn op", args[0])
}
if fs.version > ops.Version {
- return ops.errorf("field %s available in version %d. Missed #pragma version?", args[0], fs.version)
+ return ops.errorf("field %#v available in version %d. Missed #pragma version?", args[0], fs.version)
}
- val := fs.field
- ops.Txn(uint64(val))
+ ops.pending.WriteByte(spec.Opcode)
+ ops.pending.WriteByte(uint8(fs.field))
+ ops.returns(TxnFieldTypes[fs.field])
return nil
}
@@ -736,7 +611,8 @@ func assembleTxn2(ops *OpStream, spec *OpSpec, args []string) error {
return assembleTxn(ops, spec, args)
}
if len(args) == 2 {
- return assembleTxna(ops, spec, args)
+ txna := OpsByName[ops.Version]["txna"]
+ return assembleTxna(ops, &txna, args)
}
return ops.error("txn expects one or two arguments")
}
@@ -747,21 +623,27 @@ func assembleTxna(ops *OpStream, spec *OpSpec, args []string) error {
}
fs, ok := txnFieldSpecByName[args[0]]
if !ok {
- return ops.errorf("txna unknown field: %v", args[0])
+ return ops.errorf("txna unknown field: %#v", args[0])
}
_, ok = txnaFieldSpecByField[fs.field]
if !ok {
- return ops.errorf("txna unknown field: %v", args[0])
+ return ops.errorf("txna unknown field: %#v", args[0])
}
if fs.version > ops.Version {
- return ops.errorf("txna %s available in version %d. Missed #pragma version?", args[0], fs.version)
+ return ops.errorf("txna %#v available in version %d. Missed #pragma version?", args[0], fs.version)
}
arrayFieldIdx, err := strconv.ParseUint(args[1], 0, 64)
if err != nil {
return ops.error(err)
}
- fieldNum := fs.field
- ops.Txna(uint64(fieldNum), uint64(arrayFieldIdx))
+ if arrayFieldIdx > 255 {
+ return ops.errorf("txna array index beyond 255: %d", arrayFieldIdx)
+ }
+
+ ops.pending.WriteByte(spec.Opcode)
+ ops.pending.WriteByte(uint8(fs.field))
+ ops.pending.WriteByte(uint8(arrayFieldIdx))
+ ops.returns(TxnFieldTypes[fs.field])
return nil
}
@@ -769,23 +651,30 @@ func assembleGtxn(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 2 {
return ops.error("gtxn expects two arguments")
}
- gtid, err := strconv.ParseUint(args[0], 0, 64)
+ slot, err := strconv.ParseUint(args[0], 0, 64)
if err != nil {
return ops.error(err)
}
+ if slot > 255 {
+ return ops.errorf("gtxn transaction index beyond 255: %d", slot)
+ }
+
fs, ok := txnFieldSpecByName[args[1]]
if !ok {
- return ops.errorf("gtxn unknown field: %v", args[1])
+ return ops.errorf("gtxn unknown field: %#v", args[1])
}
_, ok = txnaFieldSpecByField[fs.field]
if ok {
- return ops.errorf("found array field %v in gtxn op", args[1])
+ return ops.errorf("found array field %#v in gtxn op", args[1])
}
if fs.version > ops.Version {
- return ops.errorf("field %s available in version %d. Missed #pragma version?", args[1], fs.version)
+ return ops.errorf("field %#v available in version %d. Missed #pragma version?", args[1], fs.version)
}
- val := fs.field
- ops.Gtxn(gtid, uint64(val))
+
+ ops.pending.WriteByte(spec.Opcode)
+ ops.pending.WriteByte(uint8(slot))
+ ops.pending.WriteByte(uint8(fs.field))
+ ops.returns(TxnFieldTypes[fs.field])
return nil
}
@@ -794,7 +683,8 @@ func assembleGtxn2(ops *OpStream, spec *OpSpec, args []string) error {
return assembleGtxn(ops, spec, args)
}
if len(args) == 3 {
- return assembleGtxna(ops, spec, args)
+ gtxna := OpsByName[ops.Version]["gtxna"]
+ return assembleGtxna(ops, &gtxna, args)
}
return ops.error("gtxn expects two or three arguments")
}
@@ -803,50 +693,64 @@ func assembleGtxna(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 3 {
return ops.error("gtxna expects three arguments")
}
- gtid, err := strconv.ParseUint(args[0], 0, 64)
+ slot, err := strconv.ParseUint(args[0], 0, 64)
if err != nil {
return ops.error(err)
}
+ if slot > 255 {
+ return ops.errorf("gtxna group index beyond 255: %d", slot)
+ }
+
fs, ok := txnFieldSpecByName[args[1]]
if !ok {
- return ops.errorf("gtxna unknown field: %v", args[1])
+ return ops.errorf("gtxna unknown field: %#v", args[1])
}
_, ok = txnaFieldSpecByField[fs.field]
if !ok {
- return ops.errorf("gtxna unknown field: %v", args[1])
+ return ops.errorf("gtxna unknown field: %#v", args[1])
}
if fs.version > ops.Version {
- return ops.errorf("gtxna %s available in version %d. Missed #pragma version?", args[1], fs.version)
+ return ops.errorf("gtxna %#v available in version %d. Missed #pragma version?", args[1], fs.version)
}
arrayFieldIdx, err := strconv.ParseUint(args[2], 0, 64)
if err != nil {
return ops.error(err)
}
- fieldNum := fs.field
- ops.Gtxna(gtid, uint64(fieldNum), uint64(arrayFieldIdx))
+ if arrayFieldIdx > 255 {
+ return ops.errorf("gtxna array index beyond 255: %d", arrayFieldIdx)
+ }
+
+ ops.pending.WriteByte(spec.Opcode)
+ ops.pending.WriteByte(uint8(slot))
+ ops.pending.WriteByte(uint8(fs.field))
+ ops.pending.WriteByte(uint8(arrayFieldIdx))
+ ops.returns(TxnFieldTypes[fs.field])
return nil
}
func assembleGtxns(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 2 {
- return assembleGtxnsa(ops, spec, args)
+ gtxnsa := OpsByName[ops.Version]["gtxnsa"]
+ return assembleGtxnsa(ops, &gtxnsa, args)
}
if len(args) != 1 {
return ops.error("gtxns expects one or two immediate arguments")
}
fs, ok := txnFieldSpecByName[args[0]]
if !ok {
- return ops.errorf("gtxns unknown field: %v", args[0])
+ return ops.errorf("gtxns unknown field: %#v", args[0])
}
_, ok = txnaFieldSpecByField[fs.field]
if ok {
- return ops.errorf("found array field %v in gtxns op", args[0])
+ return ops.errorf("found array field %#v in gtxns op", args[0])
}
if fs.version > ops.Version {
- return ops.errorf("field %s available in version %d. Missed #pragma version?", args[0], fs.version)
+ return ops.errorf("field %#v available in version %d. Missed #pragma version?", args[0], fs.version)
}
- val := fs.field
- ops.Gtxns(uint64(val))
+
+ ops.pending.WriteByte(spec.Opcode)
+ ops.pending.WriteByte(uint8(fs.field))
+ ops.returns(TxnFieldTypes[fs.field])
return nil
}
@@ -856,66 +760,75 @@ func assembleGtxnsa(ops *OpStream, spec *OpSpec, args []string) error {
}
fs, ok := txnFieldSpecByName[args[0]]
if !ok {
- return ops.errorf("gtxnsa unknown field: %v", args[0])
+ return ops.errorf("gtxnsa unknown field: %#v", args[0])
}
_, ok = txnaFieldSpecByField[fs.field]
if !ok {
- return ops.errorf("gtxnsa unknown field: %v", args[0])
+ return ops.errorf("gtxnsa unknown field: %#v", args[0])
}
if fs.version > ops.Version {
- return ops.errorf("gtxnsa %s available in version %d. Missed #pragma version?", args[0], fs.version)
+ return ops.errorf("gtxnsa %#v available in version %d. Missed #pragma version?", args[0], fs.version)
}
arrayFieldIdx, err := strconv.ParseUint(args[1], 0, 64)
if err != nil {
return ops.error(err)
}
- fieldNum := fs.field
- ops.Gtxnsa(uint64(fieldNum), uint64(arrayFieldIdx))
+ if arrayFieldIdx > 255 {
+ return ops.errorf("gtxnsa array index beyond 255: %d", arrayFieldIdx)
+ }
+ ops.pending.WriteByte(spec.Opcode)
+ ops.pending.WriteByte(uint8(fs.field))
+ ops.pending.WriteByte(uint8(arrayFieldIdx))
+ ops.returns(TxnFieldTypes[fs.field])
return nil
}
func assembleGlobal(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
- ops.error("global expects one argument")
- args = []string{GlobalFieldNames[0]}
+ return ops.error("global expects one argument")
}
fs, ok := globalFieldSpecByName[args[0]]
if !ok {
- ops.errorf("global unknown field: %v", args[0])
- fs, _ = globalFieldSpecByName[GlobalFieldNames[0]]
+ return ops.errorf("global unknown field: %#v", args[0])
}
if fs.version > ops.Version {
+ // no return here. we may as well continue to maintain typestack
ops.errorf("global %s available in version %d. Missed #pragma version?", args[0], fs.version)
}
- ops.Global(fs.gfield)
+
+ val := fs.gfield
+ ops.pending.WriteByte(spec.Opcode)
+ ops.pending.WriteByte(uint8(val))
+ ops.trace("%s (%s)", GlobalFieldNames[val], GlobalFieldTypes[val].String())
+ ops.returns(GlobalFieldTypes[val])
return nil
}
func assembleAssetHolding(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
- ops.error("asset_holding_get expects one argument")
- args = []string{AssetHoldingFieldNames[0]}
+ return ops.error("asset_holding_get expects one argument")
}
val, ok := assetHoldingFields[args[0]]
if !ok {
- ops.errorf("asset_holding_get unknown arg: %v", args[0])
- val = 0
+ return ops.errorf("asset_holding_get unknown arg: %#v", args[0])
}
- ops.AssetHolding(val)
+ ops.pending.WriteByte(spec.Opcode)
+ ops.pending.WriteByte(uint8(val))
+ ops.returns(AssetHoldingFieldTypes[val], StackUint64)
return nil
}
func assembleAssetParams(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
- ops.error("asset_params_get expects one argument")
- args = []string{AssetParamsFieldNames[0]}
+ return ops.error("asset_params_get expects one argument")
}
val, ok := assetParamsFields[args[0]]
if !ok {
- ops.errorf("asset_params_get unknown arg: %v", args[0])
- val = 0
+ return ops.errorf("asset_params_get unknown arg: %#v", args[0])
}
- ops.AssetParams(val)
+ ops.pending.WriteByte(spec.Opcode)
+ ops.pending.WriteByte(uint8(val))
+ ops.returns(AssetParamsFieldTypes[val], StackUint64)
return nil
}
@@ -923,9 +836,8 @@ type assembleFunc func(*OpStream, *OpSpec, []string) error
// Basic assembly. Any extra bytes of opcode are encoded as byte immediates.
func asmDefault(ops *OpStream, spec *OpSpec, args []string) error {
- ops.checkArgs(*spec)
if len(args) != spec.Details.Size-1 {
- ops.errorf("%s expects %d immediate arguments", spec.Name, spec.Details.Size)
+ ops.errorf("%s expects %d immediate arguments", spec.Name, spec.Details.Size-1)
}
ops.pending.WriteByte(spec.Opcode)
for i := 0; i < spec.Details.Size-1; i++ {
@@ -1121,11 +1033,22 @@ func (ops *OpStream) assemble(fin io.Reader) error {
ops.trace("%d: no fields\n", ops.sourceLine)
continue
}
- // we're going to process opcodes, so fix the Version
+ // we're about to begin processing opcodes, so fix the Version
if ops.Version == assemblerNoVersion {
ops.Version = AssemblerDefaultVersion
}
opstring := fields[0]
+
+ if opstring[len(opstring)-1] == ':' {
+ ops.createLabel(opstring[:len(opstring)-1])
+ fields = fields[1:]
+ if len(fields) == 0 {
+ // There was a label, not need to ops.trace this
+ continue
+ }
+ opstring = fields[0]
+ }
+
spec, ok := OpsByName[ops.Version][opstring]
if !ok {
spec, ok = keywords[opstring]
@@ -1133,14 +1056,14 @@ func (ops *OpStream) assemble(fin io.Reader) error {
if ok {
ops.trace("%3d: %s\t", ops.sourceLine, opstring)
ops.RecordSourceLine()
+ if spec.Modes == runModeApplication {
+ ops.HasStatefulOps = true
+ }
+ ops.checkArgs(spec)
spec.asm(ops, &spec, fields[1:])
ops.trace("\n")
continue
}
- if opstring[len(opstring)-1] == ':' {
- ops.createLabel(opstring[:len(opstring)-1])
- continue
- }
// unknown opcode, let's report a good error if version problem
spec, ok = OpsByName[AssemblerMaxVersion][opstring]
if ok {
@@ -1154,7 +1077,7 @@ func (ops *OpStream) assemble(fin io.Reader) error {
if ops.Version <= 1 {
for label, dest := range ops.labels {
if dest == ops.pending.Len() {
- ops.errorf("label %v is too far away", label)
+ ops.errorf("label %#v is too far away", label)
}
}
}
@@ -1225,20 +1148,20 @@ func (ops *OpStream) resolveLabels() {
dest, ok := ops.labels[lr.label]
if !ok {
if !reported[lr.label] {
- ops.errorf("reference to undefined label %v", lr.label)
+ ops.errorf("reference to undefined label %#v", lr.label)
}
reported[lr.label] = true
continue
}
// all branch instructions (currently) are opcode byte and 2 offset bytes, and the destination is relative to the next pc as if the branch was a no-op
naturalPc := lr.position + 3
- if dest < naturalPc {
- ops.errorf("label %v is before reference but only forward jumps are allowed", lr.label)
+ if ops.Version < backBranchEnabledVersion && dest < naturalPc {
+ ops.errorf("label %#v is a back reference, back jump support was introduced in TEAL v4", lr.label)
continue
}
jump := dest - naturalPc
if jump > 0x7fff {
- ops.errorf("label %v is too far away", lr.label)
+ ops.errorf("label %#v is too far away", lr.label)
continue
}
raw[lr.position+1] = uint8(jump >> 8)
@@ -1381,14 +1304,19 @@ func AssembleStringWithVersion(text string, version uint64) (*OpStream, error) {
}
type disassembleState struct {
- program []byte
- pc int
- out io.Writer
- labelCount int
- pendingLabels map[int]string
+ program []byte
+ pc int
+ out io.Writer
+
+ numericTargets bool
+ labelCount int
+ pendingLabels map[int]string
nextpc int
err error
+
+ intc []uint64
+ bytec [][]byte
}
func (dis *disassembleState) putLabel(label string, target int) {
@@ -1405,29 +1333,22 @@ func (dis *disassembleState) outputLabelIfNeeded() (err error) {
return
}
-type disassembleFunc func(dis *disassembleState, spec *OpSpec)
+type disassembleFunc func(dis *disassembleState, spec *OpSpec) (string, error)
// Basic disasemble, and extra bytes of opcode are decoded as bytes integers.
-func disDefault(dis *disassembleState, spec *OpSpec) {
+func disDefault(dis *disassembleState, spec *OpSpec) (string, error) {
lastIdx := dis.pc + spec.Details.Size - 1
if len(dis.program) <= lastIdx {
missing := lastIdx - len(dis.program) + 1
- dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- return
+ return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
}
dis.nextpc = dis.pc + spec.Details.Size
- _, dis.err = fmt.Fprintf(dis.out, "%s", spec.Name)
- if dis.err != nil {
- return
- }
+ out := spec.Name
for s := 1; s < spec.Details.Size; s++ {
b := uint(dis.program[dis.pc+s])
- _, dis.err = fmt.Fprintf(dis.out, " %d", b)
- if dis.err != nil {
- return
- }
+ out += fmt.Sprintf(" %d", b)
}
- _, dis.err = fmt.Fprintf(dis.out, "\n")
+ return out, nil
}
var errShortIntcblock = errors.New("intcblock ran past end of program")
@@ -1462,33 +1383,29 @@ func parseIntcblock(program []byte, pc int) (intc []uint64, nextpc int, err erro
return
}
-func checkIntConstBlock(cx *evalContext) int {
+func checkIntConstBlock(cx *evalContext) error {
pos := cx.pc + 1
numInts, bytesUsed := binary.Uvarint(cx.program[pos:])
if bytesUsed <= 0 {
- cx.err = fmt.Errorf("could not decode int const block size at pc=%d", pos)
- return 1
+ return fmt.Errorf("could not decode int const block size at pc=%d", pos)
}
pos += bytesUsed
if numInts > uint64(len(cx.program)) {
- cx.err = errTooManyIntc
- return 0
+ return errTooManyIntc
}
//intc = make([]uint64, numInts)
for i := uint64(0); i < numInts; i++ {
if pos >= len(cx.program) {
- cx.err = errShortIntcblock
- return 0
+ return errShortIntcblock
}
_, bytesUsed = binary.Uvarint(cx.program[pos:])
if bytesUsed <= 0 {
- cx.err = fmt.Errorf("could not decode int const[%d] at pc=%d", i, pos)
- return 1
+ return fmt.Errorf("could not decode int const[%d] at pc=%d", i, pos)
}
pos += bytesUsed
}
cx.nextpc = pos
- return 1
+ return nil
}
var errShortBytecblock = errors.New("bytecblock ran past end of program")
@@ -1534,258 +1451,319 @@ func parseBytecBlock(program []byte, pc int) (bytec [][]byte, nextpc int, err er
return
}
-func checkByteConstBlock(cx *evalContext) int {
+func checkByteConstBlock(cx *evalContext) error {
pos := cx.pc + 1
numItems, bytesUsed := binary.Uvarint(cx.program[pos:])
if bytesUsed <= 0 {
- cx.err = fmt.Errorf("could not decode []byte const block size at pc=%d", pos)
- return 1
+ return fmt.Errorf("could not decode []byte const block size at pc=%d", pos)
}
pos += bytesUsed
if numItems > uint64(len(cx.program)) {
- cx.err = errTooManyItems
- return 0
+ return errTooManyItems
}
//bytec = make([][]byte, numItems)
for i := uint64(0); i < numItems; i++ {
if pos >= len(cx.program) {
- cx.err = errShortBytecblock
- return 0
+ return errShortBytecblock
}
itemLen, bytesUsed := binary.Uvarint(cx.program[pos:])
if bytesUsed <= 0 {
- cx.err = fmt.Errorf("could not decode []byte const[%d] at pc=%d", i, pos)
- return 1
+ return fmt.Errorf("could not decode []byte const[%d] at pc=%d", i, pos)
}
pos += bytesUsed
if pos >= len(cx.program) {
- cx.err = errShortBytecblock
- return 0
+ return errShortBytecblock
}
end := uint64(pos) + itemLen
if end > uint64(len(cx.program)) || end < uint64(pos) {
- cx.err = errShortBytecblock
- return 0
+ return errShortBytecblock
}
//bytec[i] = program[pos : pos+int(itemLen)]
pos += int(itemLen)
}
cx.nextpc = pos
- return 1
+ return nil
}
-func disIntcblock(dis *disassembleState, spec *OpSpec) {
- var intc []uint64
- intc, dis.nextpc, dis.err = parseIntcblock(dis.program, dis.pc)
- if dis.err != nil {
- return
- }
- _, dis.err = fmt.Fprintf(dis.out, "intcblock")
- if dis.err != nil {
- return
+func disIntcblock(dis *disassembleState, spec *OpSpec) (string, error) {
+ intc, nextpc, err := parseIntcblock(dis.program, dis.pc)
+ if err != nil {
+ return "", err
}
+ dis.nextpc = nextpc
+ out := spec.Name
for _, iv := range intc {
- _, dis.err = fmt.Fprintf(dis.out, " %d", iv)
- if dis.err != nil {
- return
- }
+ dis.intc = append(dis.intc, iv)
+ out += fmt.Sprintf(" %d", iv)
}
- _, dis.err = dis.out.Write([]byte("\n"))
+ return out, nil
}
-func disBytecblock(dis *disassembleState, spec *OpSpec) {
- var bytec [][]byte
- bytec, dis.nextpc, dis.err = parseBytecBlock(dis.program, dis.pc)
- if dis.err != nil {
- return
+func disIntc(dis *disassembleState, spec *OpSpec) (string, error) {
+ lastIdx := dis.pc + spec.Details.Size - 1
+ if len(dis.program) <= lastIdx {
+ missing := lastIdx - len(dis.program) + 1
+ return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
}
- _, dis.err = fmt.Fprintf(dis.out, "bytecblock")
- if dis.err != nil {
- return
+ dis.nextpc = dis.pc + spec.Details.Size
+ var suffix string
+ var b int
+ switch spec.Opcode {
+ case 0x22:
+ suffix = "_0"
+ b = 0
+ case 0x23:
+ suffix = "_1"
+ b = 1
+ case 0x24:
+ suffix = "_2"
+ b = 2
+ case 0x25:
+ suffix = "_3"
+ b = 3
+ case 0x21:
+ b = int(dis.program[dis.pc+1])
+ suffix = fmt.Sprintf(" %d", b)
+ default:
+ return "", fmt.Errorf("disIntc on %v", spec)
+ }
+ if b < len(dis.intc) {
+ return fmt.Sprintf("intc%s // %d", suffix, dis.intc[b]), nil
}
+ return fmt.Sprintf("intc%s", suffix), nil
+}
+
+func disBytecblock(dis *disassembleState, spec *OpSpec) (string, error) {
+ bytec, nextpc, err := parseBytecBlock(dis.program, dis.pc)
+ if err != nil {
+ return "", err
+ }
+ dis.nextpc = nextpc
+ out := spec.Name
for _, bv := range bytec {
- _, dis.err = fmt.Fprintf(dis.out, " 0x%s", hex.EncodeToString(bv))
- if dis.err != nil {
- return
+ dis.bytec = append(dis.bytec, bv)
+ out += fmt.Sprintf(" 0x%s", hex.EncodeToString(bv))
+ }
+ return out, nil
+}
+
+func allPrintableASCII(bytes []byte) bool {
+ for _, b := range bytes {
+ if b < 32 || b > 126 {
+ return false
}
}
- _, dis.err = dis.out.Write([]byte("\n"))
+ return true
}
+func guessByteFormat(bytes []byte) string {
+ var short basics.Address
-func disPushInt(dis *disassembleState, spec *OpSpec) {
+ if len(bytes) == len(short) {
+ copy(short[:], bytes[:])
+ return fmt.Sprintf("addr %s", short.String())
+ }
+ if allPrintableASCII(bytes) {
+ return fmt.Sprintf("%#v", string(bytes))
+ }
+ return "0x" + hex.EncodeToString(bytes)
+}
+
+func disBytec(dis *disassembleState, spec *OpSpec) (string, error) {
+ lastIdx := dis.pc + spec.Details.Size - 1
+ if len(dis.program) <= lastIdx {
+ missing := lastIdx - len(dis.program) + 1
+ return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
+ }
+ dis.nextpc = dis.pc + spec.Details.Size
+ var suffix string
+ var b int
+ switch spec.Opcode {
+ case 0x28:
+ suffix = "_0"
+ b = 0
+ case 0x29:
+ suffix = "_1"
+ b = 1
+ case 0x2a:
+ suffix = "_2"
+ b = 2
+ case 0x2b:
+ suffix = "_3"
+ b = 3
+ case 0x27:
+ b = int(dis.program[dis.pc+1])
+ suffix = fmt.Sprintf(" %d", b)
+ }
+ if b < len(dis.bytec) {
+ return fmt.Sprintf("bytec%s // %s", suffix, guessByteFormat(dis.bytec[b])), nil
+ }
+ return fmt.Sprintf("bytec%s", suffix), nil
+}
+
+func disPushInt(dis *disassembleState, spec *OpSpec) (string, error) {
pos := dis.pc + 1
val, bytesUsed := binary.Uvarint(dis.program[pos:])
if bytesUsed <= 0 {
- dis.err = fmt.Errorf("could not decode int at pc=%d", pos)
- return
+ return "", fmt.Errorf("could not decode int at pc=%d", pos)
}
- pos += bytesUsed
- _, dis.err = fmt.Fprintf(dis.out, "%s %d\n", spec.Name, val)
- dis.nextpc = pos
+ dis.nextpc = pos + bytesUsed
+ return fmt.Sprintf("%s %d", spec.Name, val), nil
}
-func checkPushInt(cx *evalContext) int {
+func checkPushInt(cx *evalContext) error {
opPushInt(cx)
- return 1
+ return cx.err
}
-func disPushBytes(dis *disassembleState, spec *OpSpec) {
+func disPushBytes(dis *disassembleState, spec *OpSpec) (string, error) {
pos := dis.pc + 1
length, bytesUsed := binary.Uvarint(dis.program[pos:])
if bytesUsed <= 0 {
- dis.err = fmt.Errorf("could not decode bytes length at pc=%d", pos)
- return
+ return "", fmt.Errorf("could not decode bytes length at pc=%d", pos)
}
pos += bytesUsed
end := uint64(pos) + length
if end > uint64(len(dis.program)) || end < uint64(pos) {
- dis.err = fmt.Errorf("pushbytes too long %d %d", end, pos)
- return
+ return "", fmt.Errorf("pushbytes too long %d %d", end, pos)
}
bytes := dis.program[pos:end]
- _, dis.err = fmt.Fprintf(dis.out, "%s 0x%s", spec.Name, hex.EncodeToString(bytes))
dis.nextpc = int(end)
+ return fmt.Sprintf("%s 0x%s", spec.Name, hex.EncodeToString(bytes)), nil
}
-func checkPushBytes(cx *evalContext) int {
+func checkPushBytes(cx *evalContext) error {
opPushBytes(cx)
- return 1
+ return cx.err
}
// This is also used to disassemble gtxns
-func disTxn(dis *disassembleState, spec *OpSpec) {
+func disTxn(dis *disassembleState, spec *OpSpec) (string, error) {
lastIdx := dis.pc + 1
if len(dis.program) <= lastIdx {
missing := lastIdx - len(dis.program) + 1
- dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- return
+ return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
}
dis.nextpc = dis.pc + 2
txarg := dis.program[dis.pc+1]
if int(txarg) >= len(TxnFieldNames) {
- dis.err = fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc)
- return
+ return "", fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc)
}
- _, dis.err = fmt.Fprintf(dis.out, "%s %s\n", spec.Name, TxnFieldNames[txarg])
+ return fmt.Sprintf("%s %s", spec.Name, TxnFieldNames[txarg]), nil
}
// This is also used to disassemble gtxnsa
-func disTxna(dis *disassembleState, spec *OpSpec) {
+func disTxna(dis *disassembleState, spec *OpSpec) (string, error) {
lastIdx := dis.pc + 2
if len(dis.program) <= lastIdx {
missing := lastIdx - len(dis.program) + 1
- dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- return
+ return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
}
dis.nextpc = dis.pc + 3
txarg := dis.program[dis.pc+1]
if int(txarg) >= len(TxnFieldNames) {
- dis.err = fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc)
- return
+ return "", fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc)
}
arrayFieldIdx := dis.program[dis.pc+2]
- _, dis.err = fmt.Fprintf(dis.out, "%s %s %d\n", spec.Name, TxnFieldNames[txarg], arrayFieldIdx)
+ return fmt.Sprintf("%s %s %d", spec.Name, TxnFieldNames[txarg], arrayFieldIdx), nil
}
-func disGtxn(dis *disassembleState, spec *OpSpec) {
+func disGtxn(dis *disassembleState, spec *OpSpec) (string, error) {
lastIdx := dis.pc + 2
if len(dis.program) <= lastIdx {
missing := lastIdx - len(dis.program) + 1
- dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- return
+ return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
}
dis.nextpc = dis.pc + 3
gi := dis.program[dis.pc+1]
txarg := dis.program[dis.pc+2]
if int(txarg) >= len(TxnFieldNames) {
- dis.err = fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc)
- return
+ return "", fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc)
}
- _, dis.err = fmt.Fprintf(dis.out, "gtxn %d %s\n", gi, TxnFieldNames[txarg])
+ return fmt.Sprintf("gtxn %d %s", gi, TxnFieldNames[txarg]), nil
}
-func disGtxna(dis *disassembleState, spec *OpSpec) {
+func disGtxna(dis *disassembleState, spec *OpSpec) (string, error) {
lastIdx := dis.pc + 3
if len(dis.program) <= lastIdx {
missing := lastIdx - len(dis.program) + 1
- dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- return
+ return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
}
dis.nextpc = dis.pc + 4
gi := dis.program[dis.pc+1]
txarg := dis.program[dis.pc+2]
if int(txarg) >= len(TxnFieldNames) {
- dis.err = fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc)
- return
+ return "", fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc)
}
arrayFieldIdx := dis.program[dis.pc+3]
- _, dis.err = fmt.Fprintf(dis.out, "gtxna %d %s %d\n", gi, TxnFieldNames[txarg], arrayFieldIdx)
+ return fmt.Sprintf("gtxna %d %s %d", gi, TxnFieldNames[txarg], arrayFieldIdx), nil
}
-func disGlobal(dis *disassembleState, spec *OpSpec) {
+func disGlobal(dis *disassembleState, spec *OpSpec) (string, error) {
lastIdx := dis.pc + 1
if len(dis.program) <= lastIdx {
missing := lastIdx - len(dis.program) + 1
- dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- return
+ return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
}
dis.nextpc = dis.pc + 2
garg := dis.program[dis.pc+1]
if int(garg) >= len(GlobalFieldNames) {
- dis.err = fmt.Errorf("invalid global arg index %d at pc=%d", garg, dis.pc)
- return
+ return "", fmt.Errorf("invalid global arg index %d at pc=%d", garg, dis.pc)
}
- _, dis.err = fmt.Fprintf(dis.out, "global %s\n", GlobalFieldNames[garg])
+ return fmt.Sprintf("global %s", GlobalFieldNames[garg]), nil
}
-func disBranch(dis *disassembleState, spec *OpSpec) {
+func disBranch(dis *disassembleState, spec *OpSpec) (string, error) {
lastIdx := dis.pc + 2
if len(dis.program) <= lastIdx {
missing := lastIdx - len(dis.program) + 1
- dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- return
+ return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
}
dis.nextpc = dis.pc + 3
offset := (uint(dis.program[dis.pc+1]) << 8) | uint(dis.program[dis.pc+2])
target := int(offset) + dis.pc + 3
- label, labelExists := dis.pendingLabels[target]
- if !labelExists {
- dis.labelCount++
- label = fmt.Sprintf("label%d", dis.labelCount)
- dis.putLabel(label, target)
+ if target > 0xffff {
+ target -= 0x10000
+ }
+ var label string
+ if dis.numericTargets {
+ label = fmt.Sprintf("%d", target)
+ } else {
+ if known, ok := dis.pendingLabels[target]; ok {
+ label = known
+ } else {
+ dis.labelCount++
+ label = fmt.Sprintf("label%d", dis.labelCount)
+ dis.putLabel(label, target)
+ }
}
- _, dis.err = fmt.Fprintf(dis.out, "%s %s\n", spec.Name, label)
+ return fmt.Sprintf("%s %s", spec.Name, label), nil
}
-func disAssetHolding(dis *disassembleState, spec *OpSpec) {
+func disAssetHolding(dis *disassembleState, spec *OpSpec) (string, error) {
lastIdx := dis.pc + 1
if len(dis.program) <= lastIdx {
missing := lastIdx - len(dis.program) + 1
- dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- return
+ return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
}
dis.nextpc = dis.pc + 2
arg := dis.program[dis.pc+1]
if int(arg) >= len(AssetHoldingFieldNames) {
- dis.err = fmt.Errorf("invalid asset holding arg index %d at pc=%d", arg, dis.pc)
- return
+ return "", fmt.Errorf("invalid asset holding arg index %d at pc=%d", arg, dis.pc)
}
- _, dis.err = fmt.Fprintf(dis.out, "asset_holding_get %s\n", AssetHoldingFieldNames[arg])
+ return fmt.Sprintf("asset_holding_get %s", AssetHoldingFieldNames[arg]), nil
}
-func disAssetParams(dis *disassembleState, spec *OpSpec) {
+func disAssetParams(dis *disassembleState, spec *OpSpec) (string, error) {
lastIdx := dis.pc + 1
if len(dis.program) <= lastIdx {
missing := lastIdx - len(dis.program) + 1
- dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- return
+ return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
}
dis.nextpc = dis.pc + 2
arg := dis.program[dis.pc+1]
if int(arg) >= len(AssetParamsFieldNames) {
- dis.err = fmt.Errorf("invalid asset params arg index %d at pc=%d", arg, dis.pc)
- return
+ return "", fmt.Errorf("invalid asset params arg index %d at pc=%d", arg, dis.pc)
}
- _, dis.err = fmt.Fprintf(dis.out, "asset_params_get %s\n", AssetParamsFieldNames[arg])
+ return fmt.Sprintf("asset_params_get %s", AssetParamsFieldNames[arg]), nil
}
type disInfo struct {
@@ -1809,7 +1787,7 @@ func disassembleInstrumented(program []byte) (text string, ds disInfo, err error
text = out.String()
return
}
- fmt.Fprintf(dis.out, "// version %d\n", version)
+ fmt.Fprintf(dis.out, "#pragma version %d\n", version)
dis.pc = vlen
for dis.pc < len(program) {
err = dis.outputLabelIfNeeded()
@@ -1834,11 +1812,13 @@ func disassembleInstrumented(program []byte) (text string, ds disInfo, err error
ds.pcOffset = append(ds.pcOffset, PCOffset{dis.pc, out.Len()})
// Actually do the disassembly
- op.dis(&dis, &op)
- if dis.err != nil {
- err = dis.err
+ var line string
+ line, err = op.dis(&dis, &op)
+ if err != nil {
return
}
+ out.WriteString(line)
+ out.WriteRune('\n')
dis.pc = dis.nextpc
}
err = dis.outputLabelIfNeeded()
diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go
index 762335193..1fca9c849 100644
--- a/data/transactions/logic/assembler_test.go
+++ b/data/transactions/logic/assembler_test.go
@@ -244,6 +244,36 @@ pushint 1000
pushbytes "john"
`
+// Keep in mind, only use existing int and byte constants, or else use
+// push* instead. The idea is to not cause the *cblocks to change.
+const v4Nonsense = `
+int 1
+pushint 2000
+int 0
+int 2
+divw
+callsub stuff
+b next
+stuff:
+retsub
+next:
+int 1
+`
+
+var nonsense = map[uint64]string{
+ 1: v1Nonsense,
+ 2: v1Nonsense + v2Nonsense,
+ 3: v1Nonsense + v2Nonsense + v3Nonsense,
+ 4: v1Nonsense + v2Nonsense + v3Nonsense + v4Nonsense,
+}
+
+var compiled = map[uint64]string{
+ 1: "012008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b1716154000032903494",
+ 2: "022008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f",
+ 3: "032008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f4478222105531421055427042106552105082106564c4d4b02210538212106391c0081e80780046a6f686e",
+ 4: "042008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f4478222105531421055427042106552105082106564c4d4b02210538212106391c0081e80780046a6f686e210581d00f210721061f880003420001892105",
+}
+
func pseudoOp(opcode string) bool {
// We don't test every combination of
// intcblock,bytecblock,intc*,bytec*,arg* here. Not all of
@@ -263,44 +293,33 @@ func TestAssemble(t *testing.T) {
// Run test. It should pass.
//
// This doesn't have to be a sensible program to run, it just has to compile.
- for _, spec := range OpSpecs {
- // Ensure that we have some basic check of all the ops, except
- if !strings.Contains(v1Nonsense+v2Nonsense, spec.Name) &&
- !pseudoOp(spec.Name) && spec.Version <= 2 {
- t.Errorf("v2 nonsense test should contain op %v", spec.Name)
- }
- }
- // First, we test v2, not AssemblerMaxVersion. A higher version is
- // allowed to differ (and must, in the first byte).
- ops := testProg(t, v1Nonsense+v2Nonsense, 2)
- // check that compilation is stable over time and we assemble to the same bytes this month that we did last month.
- expectedBytes, _ := hex.DecodeString("022008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f")
- if bytes.Compare(expectedBytes, ops.Program) != 0 {
- // this print is for convenience if the program has been changed. the hex string can be copy pasted back in as a new expected result.
- t.Log(hex.EncodeToString(ops.Program))
- }
- require.Equal(t, expectedBytes, ops.Program)
-
- // We test v3 here, and compare to AssemblerMaxVersion, with
- // the intention that the test breaks the next time
- // AssemblerMaxVersion is increased. At that point, we would
- // add a new test for v4, and leave behind this test for v3.
-
- for _, spec := range OpSpecs {
- // Ensure that we have some basic check of all the ops, except
- if !strings.Contains(v1Nonsense+v2Nonsense+v3Nonsense, spec.Name) &&
- !pseudoOp(spec.Name) && spec.Version <= 3 {
- t.Errorf("v3 nonsense test should contain op %v", spec.Name)
- }
- }
- ops = testProg(t, v1Nonsense+v2Nonsense+v3Nonsense, AssemblerMaxVersion)
- // check that compilation is stable over time and we assemble to the same bytes this month that we did last month.
- expectedBytes, _ = hex.DecodeString("032008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f4478222105531421055427042106552105082106564c4d4b02210538212106391c0081e80780046a6f686e")
- if bytes.Compare(expectedBytes, ops.Program) != 0 {
- // this print is for convenience if the program has been changed. the hex string can be copy pasted back in as a new expected result.
- t.Log(hex.EncodeToString(ops.Program))
+
+ t.Parallel()
+ for v := uint64(2); v <= AssemblerMaxVersion; v++ {
+ t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
+ for _, spec := range OpSpecs {
+ // Make sure our nonsense covers the ops
+ if !strings.Contains(nonsense[v], spec.Name) &&
+ !pseudoOp(spec.Name) && spec.Version <= v {
+ t.Errorf("v%d nonsense test should contain op %v", v, spec.Name)
+ }
+ }
+
+ ops := testProg(t, nonsense[v], v)
+ // check that compilation is stable over
+ // time. we must assemble to the same bytes
+ // this month that we did last month.
+ expectedBytes, _ := hex.DecodeString(compiled[v])
+ if bytes.Compare(expectedBytes, ops.Program) != 0 {
+ // this print is for convenience if
+ // the program has been changed. the
+ // hex string can be copy pasted back
+ // in as a new expected result.
+ t.Log(hex.EncodeToString(ops.Program))
+ }
+ require.Equal(t, expectedBytes, ops.Program)
+ })
}
- require.Equal(t, expectedBytes, ops.Program)
}
func TestAssembleAlias(t *testing.T) {
@@ -337,7 +356,7 @@ func testMatch(t *testing.T, actual, expected string) {
} else if strings.HasSuffix(expected, "...") {
require.Contains(t, "^"+actual, "^"+expected[:len(expected)-3])
} else {
- require.Equal(t, actual, expected)
+ require.Equal(t, expected, actual)
}
}
@@ -371,7 +390,7 @@ func testProg(t *testing.T, source string, ver uint64, expected ...expect) *OpSt
break
}
}
- require.NotNil(t, found)
+ require.NotNil(t, found, "No error on line %d", exp.l)
msg := found.Unwrap().Error()
testMatch(t, msg, exp.s)
}
@@ -395,11 +414,11 @@ func testLine(t *testing.T, line string, ver uint64, expected string) {
func TestAssembleTxna(t *testing.T) {
testLine(t, "txna Accounts 256", AssemblerMaxVersion, "txna array index beyond 255: 256")
testLine(t, "txna ApplicationArgs 256", AssemblerMaxVersion, "txna array index beyond 255: 256")
- testLine(t, "txna Sender 256", AssemblerMaxVersion, "txna unknown field: Sender")
+ testLine(t, "txna Sender 256", AssemblerMaxVersion, "txna unknown field: \"Sender\"")
testLine(t, "gtxna 0 Accounts 256", AssemblerMaxVersion, "gtxna array index beyond 255: 256")
testLine(t, "gtxna 0 ApplicationArgs 256", AssemblerMaxVersion, "gtxna array index beyond 255: 256")
testLine(t, "gtxna 256 Accounts 0", AssemblerMaxVersion, "gtxna group index beyond 255: 256")
- testLine(t, "gtxna 0 Sender 256", AssemblerMaxVersion, "gtxna unknown field: Sender")
+ testLine(t, "gtxna 0 Sender 256", AssemblerMaxVersion, "gtxna unknown field: \"Sender\"")
testLine(t, "txn Accounts 0", 1, "txn expects one argument")
testLine(t, "txn Accounts 0 1", 2, "txn expects one or two arguments")
testLine(t, "txna Accounts 0 1", AssemblerMaxVersion, "txna expects two arguments")
@@ -409,20 +428,20 @@ func TestAssembleTxna(t *testing.T) {
testLine(t, "gtxna 0 Accounts 1 2", AssemblerMaxVersion, "gtxna expects three arguments")
testLine(t, "gtxna a Accounts 0", AssemblerMaxVersion, "strconv.ParseUint...")
testLine(t, "gtxna 0 Accounts a", AssemblerMaxVersion, "strconv.ParseUint...")
- testLine(t, "txn ABC", 2, "txn unknown field: ABC")
- testLine(t, "gtxn 0 ABC", 2, "gtxn unknown field: ABC")
+ testLine(t, "txn ABC", 2, "txn unknown field: \"ABC\"")
+ testLine(t, "gtxn 0 ABC", 2, "gtxn unknown field: \"ABC\"")
testLine(t, "gtxn a ABC", 2, "strconv.ParseUint...")
- testLine(t, "txn Accounts", AssemblerMaxVersion, "found array field Accounts in txn op")
- testLine(t, "txn Accounts", 1, "found array field Accounts in txn op")
+ testLine(t, "txn Accounts", AssemblerMaxVersion, "found array field \"Accounts\" in txn op")
+ testLine(t, "txn Accounts", 1, "found array field \"Accounts\" in txn op")
testLine(t, "txn Accounts 0", AssemblerMaxVersion, "")
- testLine(t, "gtxn 0 Accounts", AssemblerMaxVersion, "found array field Accounts in gtxn op")
- testLine(t, "gtxn 0 Accounts", 1, "found array field Accounts in gtxn op")
+ testLine(t, "gtxn 0 Accounts", AssemblerMaxVersion, "found array field \"Accounts\" in gtxn op")
+ testLine(t, "gtxn 0 Accounts", 1, "found array field \"Accounts\" in gtxn op")
testLine(t, "gtxn 0 Accounts 1", AssemblerMaxVersion, "")
}
func TestAssembleGlobal(t *testing.T) {
testLine(t, "global", AssemblerMaxVersion, "global expects one argument")
- testLine(t, "global a", AssemblerMaxVersion, "global unknown field: a")
+ testLine(t, "global a", AssemblerMaxVersion, "global unknown field: \"a\"")
}
func TestAssembleDefault(t *testing.T) {
@@ -758,9 +777,14 @@ func TestAssembleRejectNegJump(t *testing.T) {
int 1
bnz wat
int 2`
- for v := uint64(1); v <= AssemblerMaxVersion; v++ {
+ for v := uint64(1); v < backBranchEnabledVersion; v++ {
+ t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
+ testProg(t, source, v, expect{3, "label \"wat\" is a back reference..."})
+ })
+ }
+ for v := uint64(backBranchEnabledVersion); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- testProg(t, source, v, expect{3, "label wat is before reference but only forward jumps are allowed"})
+ testProg(t, source, v)
})
}
}
@@ -796,7 +820,7 @@ bnz nowhere
int 2`
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- testProg(t, source, v, expect{2, "reference to undefined label nowhere"})
+ testProg(t, source, v, expect{2, "reference to undefined label \"nowhere\""})
})
}
}
@@ -826,28 +850,31 @@ int 2`
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
testProg(t, source, v,
- expect{2, "reference to undefined label nowhere"},
- expect{4, "txn unknown field: XYZ"})
+ expect{2, "reference to undefined label \"nowhere\""},
+ expect{4, "txn unknown field: \"XYZ\""})
})
}
}
func TestAssembleDisassemble(t *testing.T) {
// Specifically constructed program text that should be recreated by Disassemble()
- // TODO: disassemble to int/byte psuedo-ops instead of raw intcblock/bytecblock/intc/bytec
t.Parallel()
- text := fmt.Sprintf(`// version %d
+ text := fmt.Sprintf(`#pragma version %d
intcblock 0 1 2 3 4 5
-bytecblock 0xcafed00d 0x1337 0x2001 0xdeadbeef 0x70077007
-intc_1
-intc_0
+bytecblock 0xcafed00d 0x1337 0x68656c6c6f 0xdeadbeef 0x70077007 0x0102030405060708091011121314151617181920212223242526272829303132
+bytec_2 // "hello"
+pop
+bytec 5 // addr AEBAGBAFAYDQQCIQCEJBGFAVCYLRQGJAEERCGJBFEYTSQKJQGEZHVJ5ZZY
+pop
+intc_1 // 1
+intc_0 // 0
+
-intc 4
+intc 4 // 4
*
-bytec_1
-bytec_0
+bytec_1 // 0x1337
+bytec_0 // 0xcafed00d
==
-bytec 4
+bytec 4 // 0x70077007
len
+
arg_0
@@ -944,7 +971,7 @@ gtxn 12 Fee
func TestAssembleDisassembleCycle(t *testing.T) {
// Test that disassembly re-assembles to the same program bytes.
- // It disassembly won't necessarily perfectly recreate the source text, but assembling the result of Disassemble() should be the same program bytes.
+ // Disassembly won't necessarily perfectly recreate the source text, but assembling the result of Disassemble() should be the same program bytes.
t.Parallel()
tests := map[uint64]string{
@@ -955,24 +982,56 @@ func TestAssembleDisassembleCycle(t *testing.T) {
// This confirms that each program compiles to the same bytes
// (except the leading version indicator), when compiled under
- // original and max versions. That doesn't *have* to be true,
- // as we can introduce optimizations in later versions that
- // change the bytecode emitted. But currently it is, so we
- // test it for now to catch any suprises.
+ // original version, unspecified version (so it should pick up
+ // the pragma) and current version with pragma removed. That
+ // doesn't *have* to be true, as we can introduce
+ // optimizations in later versions that change the bytecode
+ // emitted. But currently it is, so we test it for now to
+ // catch any suprises.
for v, source := range tests {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, source, v)
t2, err := Disassemble(ops.Program)
require.NoError(t, err)
- ops2 := testProg(t, t2, AssemblerMaxVersion)
- if err != nil {
- t.Log(t2)
- }
- require.Equal(t, ops.Program[1:], ops2.Program[1:])
+ none := testProg(t, t2, assemblerNoVersion)
+ require.Equal(t, ops.Program[1:], none.Program[1:])
+ t3 := "// " + t2 // This comments out the #pragma version
+ current := testProg(t, t3, AssemblerMaxVersion)
+ require.Equal(t, ops.Program[1:], current.Program[1:])
})
}
}
+func TestConstantDisassembly(t *testing.T) {
+ t.Parallel()
+
+ ops := testProg(t, "int 47", AssemblerMaxVersion)
+ out, err := Disassemble(ops.Program)
+ require.NoError(t, err)
+ require.Contains(t, out, "// 47")
+
+ ops = testProg(t, "byte \"john\"", AssemblerMaxVersion)
+ out, err = Disassemble(ops.Program)
+ require.NoError(t, err)
+ require.Contains(t, out, "// \"john\"")
+
+ ops = testProg(t, "byte \"!&~\"", AssemblerMaxVersion)
+ out, err = Disassemble(ops.Program)
+ require.NoError(t, err)
+ require.Contains(t, out, "// \"!&~\"")
+
+ ops = testProg(t, "byte 0x010720", AssemblerMaxVersion)
+ out, err = Disassemble(ops.Program)
+ require.NoError(t, err)
+ require.Contains(t, out, "// 0x010720")
+
+ ops = testProg(t, "addr AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ", AssemblerMaxVersion)
+ out, err = Disassemble(ops.Program)
+ require.NoError(t, err)
+ require.Contains(t, out, "// addr AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ")
+
+}
+
func TestAssembleDisassembleErrors(t *testing.T) {
t.Parallel()
@@ -1103,75 +1162,126 @@ int 1
func TestAssembleAsset(t *testing.T) {
t.Parallel()
-
- testLine(t, "asset_holding_get ABC 1", AssemblerMaxVersion, "asset_holding_get expects one argument")
- testLine(t, "asset_holding_get ABC", AssemblerMaxVersion, "asset_holding_get unknown arg: ABC")
- testLine(t, "asset_params_get ABC 1", AssemblerMaxVersion, "asset_params_get expects one argument")
- testLine(t, "asset_params_get ABC", AssemblerMaxVersion, "asset_params_get unknown arg: ABC")
+ introduction := OpsByName[LogicVersion]["asset_holding_get"].Version
+ for v := introduction; v <= AssemblerMaxVersion; v++ {
+ testProg(t, "asset_holding_get ABC 1", v,
+ expect{1, "asset_holding_get arg 1..."})
+ testProg(t, "int 1; asset_holding_get ABC 1", v,
+ expect{2, "asset_holding_get arg 0..."})
+ testProg(t, "int 1; int 1; asset_holding_get ABC 1", v,
+ expect{3, "asset_holding_get expects one argument"})
+ testProg(t, "int 1; int 1; asset_holding_get ABC", v,
+ expect{3, "asset_holding_get unknown arg: \"ABC\""})
+
+ testProg(t, "byte 0x1234; asset_params_get ABC 1", v,
+ expect{2, "asset_params_get arg 0 wanted type uint64..."})
+
+ testLine(t, "asset_params_get ABC 1", v, "asset_params_get expects one argument")
+ testLine(t, "asset_params_get ABC", v, "asset_params_get unknown arg: \"ABC\"")
+ }
}
func TestDisassembleSingleOp(t *testing.T) {
t.Parallel()
- // test ensures no double arg_0 entries in disassembly listing
- sample := fmt.Sprintf("// version %d\narg_0\n", AssemblerMaxVersion)
- ops, err := AssembleStringWithVersion(sample, AssemblerMaxVersion)
- require.NoError(t, err)
- require.Equal(t, 2, len(ops.Program))
+
+ for v := uint64(1); v <= AssemblerMaxVersion; v++ {
+ // test ensures no double arg_0 entries in disassembly listing
+ sample := fmt.Sprintf("#pragma version %d\narg_0\n", v)
+ ops, err := AssembleStringWithVersion(sample, v)
+ require.NoError(t, err)
+ require.Equal(t, 2, len(ops.Program))
+ disassembled, err := Disassemble(ops.Program)
+ require.NoError(t, err)
+ require.Equal(t, sample, disassembled)
+ }
+}
+
+func TestDisassembleInt(t *testing.T) {
+ t.Parallel()
+ txnSample := fmt.Sprintf("#pragma version %d\nint 17\nint 27\nint 37\nint 47\nint 5\n", AssemblerMaxVersion)
+ ops := testProg(t, txnSample, AssemblerMaxVersion)
disassembled, err := Disassemble(ops.Program)
require.NoError(t, err)
- require.Equal(t, sample, disassembled)
+ // Would ne nice to check that these appear in the
+ // disassembled output in the right order, but I don't want to
+ // hardcode checks that they are in certain intc slots.
+ require.Contains(t, disassembled, "// 17")
+ require.Contains(t, disassembled, "// 27")
+ require.Contains(t, disassembled, "// 37")
+ require.Contains(t, disassembled, "// 47")
+ require.Contains(t, disassembled, "// 5")
}
func TestDisassembleTxna(t *testing.T) {
t.Parallel()
- // check txn and txna are properly disassembled
- txnSample := fmt.Sprintf("// version %d\ntxn Sender\n", AssemblerMaxVersion)
- ops, err := AssembleStringWithVersion(txnSample, AssemblerMaxVersion)
- require.NoError(t, err)
- disassembled, err := Disassemble(ops.Program)
- require.NoError(t, err)
- require.Equal(t, txnSample, disassembled)
+ // txn was 1, but this tests both
+ introduction := OpsByName[LogicVersion]["gtxna"].Version
+ for v := introduction; v <= AssemblerMaxVersion; v++ {
+ // check txn and txna are properly disassembled
+ txnSample := fmt.Sprintf("#pragma version %d\ntxn Sender\n", v)
+ ops := testProg(t, txnSample, v)
+ disassembled, err := Disassemble(ops.Program)
+ require.NoError(t, err)
+ require.Equal(t, txnSample, disassembled)
- txnaSample := fmt.Sprintf("// version %d\ntxna Accounts 0\n", AssemblerMaxVersion)
- ops, err = AssembleStringWithVersion(txnaSample, AssemblerMaxVersion)
- require.NoError(t, err)
- disassembled, err = Disassemble(ops.Program)
- require.NoError(t, err)
- require.Equal(t, txnaSample, disassembled)
+ txnaSample := fmt.Sprintf("#pragma version %d\ntxna Accounts 0\n", v)
+ ops = testProg(t, txnaSample, v)
+ disassembled, err = Disassemble(ops.Program)
+ require.NoError(t, err)
+ require.Equal(t, txnaSample, disassembled)
- txnSample2 := fmt.Sprintf("// version %d\ntxn Accounts 0\n", AssemblerMaxVersion)
- ops, err = AssembleStringWithVersion(txnSample2, AssemblerMaxVersion)
- require.NoError(t, err)
- disassembled, err = Disassemble(ops.Program)
- require.NoError(t, err)
- // compare with txnaSample, not txnSample2
- require.Equal(t, txnaSample, disassembled)
+ txnSample2 := fmt.Sprintf("#pragma version %d\ntxn Accounts 0\n", v)
+ ops = testProg(t, txnSample2, v)
+ disassembled, err = Disassemble(ops.Program)
+ require.NoError(t, err)
+ // compare with txnaSample, not txnSample2
+ require.Equal(t, txnaSample, disassembled)
+ }
}
func TestDisassembleGtxna(t *testing.T) {
t.Parallel()
// check gtxn and gtxna are properly disassembled
- gtxnSample := fmt.Sprintf("// version %d\ngtxn 0 Sender\n", AssemblerMaxVersion)
- ops, err := AssembleStringWithVersion(gtxnSample, AssemblerMaxVersion)
- require.NoError(t, err)
- disassembled, err := Disassemble(ops.Program)
- require.NoError(t, err)
- require.Equal(t, gtxnSample, disassembled)
- gtxnaSample := fmt.Sprintf("// version %d\ngtxna 0 Accounts 0\n", AssemblerMaxVersion)
- ops, err = AssembleStringWithVersion(gtxnaSample, AssemblerMaxVersion)
+ introduction := OpsByName[LogicVersion]["gtxna"].Version
+ for v := introduction; v <= AssemblerMaxVersion; v++ {
+ gtxnSample := fmt.Sprintf("#pragma version %d\ngtxn 0 Sender\n", v)
+ ops := testProg(t, gtxnSample, v)
+ disassembled, err := Disassemble(ops.Program)
+ require.NoError(t, err)
+ require.Equal(t, gtxnSample, disassembled)
+
+ gtxnaSample := fmt.Sprintf("#pragma version %d\ngtxna 0 Accounts 0\n", v)
+ ops = testProg(t, gtxnaSample, v)
+ disassembled, err = Disassemble(ops.Program)
+ require.NoError(t, err)
+ require.Equal(t, gtxnaSample, disassembled)
+
+ gtxnSample2 := fmt.Sprintf("#pragma version %d\ngtxn 0 Accounts 0\n", v)
+ ops = testProg(t, gtxnSample2, v)
+ disassembled, err = Disassemble(ops.Program)
+ require.NoError(t, err)
+ // compare with gtxnaSample, not gtxnSample2
+ require.Equal(t, gtxnaSample, disassembled)
+ }
+}
+
+func TestDisassemblePushConst(t *testing.T) {
+ t.Parallel()
+ // check pushint and pushbytes are properly disassembled
+ intSample := fmt.Sprintf("#pragma version %d\npushint 1\n", AssemblerMaxVersion)
+ ops, err := AssembleStringWithVersion(intSample, AssemblerMaxVersion)
require.NoError(t, err)
- disassembled, err = Disassemble(ops.Program)
+ disassembled, err := Disassemble(ops.Program)
require.NoError(t, err)
- require.Equal(t, gtxnaSample, disassembled)
+ require.Equal(t, intSample, disassembled)
- gtxnSample2 := fmt.Sprintf("// version %d\ngtxn 0 Accounts 0\n", AssemblerMaxVersion)
- ops, err = AssembleStringWithVersion(gtxnSample2, AssemblerMaxVersion)
+ bytesSample := fmt.Sprintf("#pragma version %d\npushbytes 0x01\n", AssemblerMaxVersion)
+ ops, err = AssembleStringWithVersion(bytesSample, AssemblerMaxVersion)
require.NoError(t, err)
disassembled, err = Disassemble(ops.Program)
require.NoError(t, err)
- // comapre with gtxnaSample, not gtxnSample2
- require.Equal(t, gtxnaSample, disassembled)
+ require.Equal(t, bytesSample, disassembled)
}
func TestDisassembleLastLabel(t *testing.T) {
@@ -1180,9 +1290,9 @@ func TestDisassembleLastLabel(t *testing.T) {
// starting from TEAL v2 branching to the last line are legal
for v := uint64(2); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- source := fmt.Sprintf(`// version %d
+ source := fmt.Sprintf(`#pragma version %d
intcblock 1
-intc_0
+intc_0 // 1
bnz label1
label1:
`, v)
@@ -1542,8 +1652,8 @@ func TestErrShortBytecblock(t *testing.T) {
var cx evalContext
cx.program = ops.Program
- checkIntConstBlock(&cx)
- require.Equal(t, cx.err, errShortIntcblock)
+ err = checkIntConstBlock(&cx)
+ require.Equal(t, err, errShortIntcblock)
}
func TestBranchAssemblyTypeCheck(t *testing.T) {
diff --git a/data/transactions/logic/backwardCompat_test.go b/data/transactions/logic/backwardCompat_test.go
index 2b3c1cb56..5c6ada563 100644
--- a/data/transactions/logic/backwardCompat_test.go
+++ b/data/transactions/logic/backwardCompat_test.go
@@ -267,9 +267,9 @@ func TestBackwardCompatTEALv1(t *testing.T) {
require.NoError(t, err)
require.Equal(t, program, ops.Program)
// ensure the old program is the same as a new one except TEAL version byte
- ops, err = AssembleStringWithVersion(sourceTEALv1, AssemblerMaxVersion)
+ opsV2, err := AssembleStringWithVersion(sourceTEALv1, 2)
require.NoError(t, err)
- require.Equal(t, program[1:], ops.Program[1:])
+ require.Equal(t, program[1:], opsV2.Program[1:])
sig := c.Sign(Msg{
ProgramHash: crypto.HashObj(Program(program)),
@@ -285,14 +285,22 @@ func TestBackwardCompatTEALv1(t *testing.T) {
txn.Txn.RekeyTo = basics.Address{} // RekeyTo not allowed in TEAL v1
sb := strings.Builder{}
- ep := defaultEvalParams(&sb, &txn)
+ ep := defaultEvalParamsWithVersion(&sb, &txn, 1)
ep.TxnGroup = txgroup
// ensure v1 program runs well on latest TEAL evaluator
require.Equal(t, uint8(1), program[0])
- cost, err := Check(program, ep)
+
+ // Cost should stay exactly 2140
+ ep.Proto.LogicSigMaxCost = 2139
+ err = Check(program, ep)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "static cost")
+
+ ep.Proto.LogicSigMaxCost = 2140
+ err = Check(program, ep)
require.NoError(t, err)
- require.Equal(t, 2140, cost)
+
pass, err := Eval(program, ep)
if err != nil || !pass {
t.Log(hex.EncodeToString(program))
@@ -301,12 +309,19 @@ func TestBackwardCompatTEALv1(t *testing.T) {
require.NoError(t, err)
require.True(t, pass)
- cost2, err := Check(ops.Program, ep)
+ // Costs for v2 should be higher because of hash opcode cost changes
+ ep2 := defaultEvalParamsWithVersion(&sb, &txn, 2)
+ ep2.TxnGroup = txgroup
+ ep2.Proto.LogicSigMaxCost = 2307
+ err = Check(opsV2.Program, ep2)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "static cost")
+
+ ep2.Proto.LogicSigMaxCost = 2308
+ err = Check(opsV2.Program, ep2)
require.NoError(t, err)
- // Costs for v2 should be higher because of hash opcode cost changes
- require.Equal(t, 2308, cost2)
- pass, err = Eval(ops.Program, ep)
+ pass, err = Eval(opsV2.Program, ep2)
if err != nil || !pass {
t.Log(hex.EncodeToString(ops.Program))
t.Log(sb.String())
@@ -315,6 +330,8 @@ func TestBackwardCompatTEALv1(t *testing.T) {
require.True(t, pass)
// ensure v0 program runs well on latest TEAL evaluator
+ ep = defaultEvalParams(&sb, &txn)
+ ep.TxnGroup = txgroup
program[0] = 0
sig = c.Sign(Msg{
ProgramHash: crypto.HashObj(Program(program)),
@@ -322,12 +339,35 @@ func TestBackwardCompatTEALv1(t *testing.T) {
})
txn.Lsig.Logic = program
txn.Lsig.Args = [][]byte{data[:], sig[:], pk[:], txn.Txn.Sender[:], txn.Txn.Note}
- cost, err = Check(program, ep)
+
+ // Cost remains the same, because v0 does not get dynamic treatment
+ ep.Proto.LogicSigMaxCost = 2139
+ err = Check(program, ep)
+ require.Error(t, err)
+
+ ep.Proto.LogicSigMaxCost = 2140
+ err = Check(program, ep)
require.NoError(t, err)
- require.Equal(t, 2140, cost)
pass, err = Eval(program, ep)
require.NoError(t, err)
require.True(t, pass)
+
+ // But in v4, cost is now dynamic and exactly 1 less than v2/v3,
+ // because bnz skips "err". It's caught during Eval
+ program[0] = 4
+ ep.Proto.LogicSigMaxCost = 2306
+ err = Check(program, ep)
+ require.NoError(t, err)
+ _, err = Eval(program, ep)
+ require.Error(t, err)
+
+ ep.Proto.LogicSigMaxCost = 2307
+ err = Check(program, ep)
+ require.NoError(t, err)
+ pass, err = Eval(program, ep)
+ require.NoError(t, err)
+ require.True(t, pass)
+
}
// ensure v2 fields error on pre TEAL v2 logicsig version
@@ -418,7 +458,7 @@ func TestBackwardCompatTxnFields(t *testing.T) {
if _, ok := txnaFieldSpecByField[fs.field]; ok {
parts := strings.Split(text, " ")
op := parts[0]
- asmError = fmt.Sprintf("found array field %s in %s op", field, op)
+ asmError = fmt.Sprintf("found array field %#v in %s op", field, op)
}
// check assembler fails if version before introduction
testLine(t, text, assemblerNoVersion, asmError)
@@ -481,15 +521,15 @@ bnz done
done:`
t.Run("v=default", func(t *testing.T) {
- testProg(t, source, assemblerNoVersion, expect{4, "label done is too far away"})
+ testProg(t, source, assemblerNoVersion, expect{4, "label \"done\" is too far away"})
})
t.Run("v=default", func(t *testing.T) {
- testProg(t, source, 0, expect{4, "label done is too far away"})
+ testProg(t, source, 0, expect{4, "label \"done\" is too far away"})
})
t.Run("v=default", func(t *testing.T) {
- testProg(t, source, 1, expect{4, "label done is too far away"})
+ testProg(t, source, 1, expect{4, "label \"done\" is too far away"})
})
for v := uint64(2); v <= AssemblerMaxVersion; v++ {
diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go
index 4da698f34..a64f92511 100644
--- a/data/transactions/logic/doc.go
+++ b/data/transactions/logic/doc.go
@@ -22,197 +22,177 @@ import (
"github.com/algorand/go-algorand/protocol"
)
-type stringString struct {
- a string
- b string
-}
-
-func stringStringListToMap(they []stringString) map[string]string {
- out := make(map[string]string, len(they))
- for _, v := range they {
- out[v.a] = v.b
- }
- return out
-}
-
// short description of every op
-var opDocList = []stringString{
- {"err", "Error. Panic immediately. This is primarily a fencepost against accidental zero bytes getting compiled into programs."},
- {"sha256", "SHA256 hash of value X, yields [32]byte"},
- {"keccak256", "Keccak256 hash of value X, yields [32]byte"},
- {"sha512_256", "SHA512_256 hash of value X, yields [32]byte"},
- {"ed25519verify", "for (data A, signature B, pubkey C) verify the signature of (\"ProgData\" || program_hash || data) against the pubkey => {0 or 1}"},
- {"+", "A plus B. Panic on overflow."},
- {"-", "A minus B. Panic if B > A."},
- {"/", "A divided by B. Panic if B == 0."},
- {"*", "A times B. Panic on overflow."},
- {"<", "A less than B => {0 or 1}"},
- {">", "A greater than B => {0 or 1}"},
- {"<=", "A less than or equal to B => {0 or 1}"},
- {">=", "A greater than or equal to B => {0 or 1}"},
- {"&&", "A is not zero and B is not zero => {0 or 1}"},
- {"||", "A is not zero or B is not zero => {0 or 1}"},
- {"==", "A is equal to B => {0 or 1}"},
- {"!=", "A is not equal to B => {0 or 1}"},
- {"!", "X == 0 yields 1; else 0"},
- {"len", "yields length of byte value X"},
- {"itob", "converts uint64 X to big endian bytes"},
- {"btoi", "converts bytes X as big endian to uint64"},
- {"%", "A modulo B. Panic if B == 0."},
- {"|", "A bitwise-or B"},
- {"&", "A bitwise-and B"},
- {"^", "A bitwise-xor B"},
- {"~", "bitwise invert value X"},
- {"mulw", "A times B out to 128-bit long result as low (top) and high uint64 values on the stack"},
- {"addw", "A plus B out to 128-bit long result as sum (top) and carry-bit uint64 values on the stack"},
- {"intcblock", "prepare block of uint64 constants for use by intc"},
- {"intc", "push Ith constant from intcblock to stack"},
- {"intc_0", "push constant 0 from intcblock to stack"},
- {"intc_1", "push constant 1 from intcblock to stack"},
- {"intc_2", "push constant 2 from intcblock to stack"},
- {"intc_3", "push constant 3 from intcblock to stack"},
- {"pushint", "push immediate UINT to the stack as an integer"},
- {"bytecblock", "prepare block of byte-array constants for use by bytec"},
- {"bytec", "push Ith constant from bytecblock to stack"},
- {"bytec_0", "push constant 0 from bytecblock to stack"},
- {"bytec_1", "push constant 1 from bytecblock to stack"},
- {"bytec_2", "push constant 2 from bytecblock to stack"},
- {"bytec_3", "push constant 3 from bytecblock to stack"},
- {"pushbytes", "push the following program bytes to the stack"},
- {"arg", "push Nth LogicSig argument to stack"},
- {"arg_0", "push LogicSig argument 0 to stack"},
- {"arg_1", "push LogicSig argument 1 to stack"},
- {"arg_2", "push LogicSig argument 2 to stack"},
- {"arg_3", "push LogicSig argument 3 to stack"},
- {"txn", "push field F of current transaction to stack"},
- {"gtxn", "push field F of the Tth transaction in the current group"},
- {"gtxns", "push field F of the Ath transaction in the current group"},
- {"txna", "push Ith value of the array field F of the current transaction"},
- {"gtxna", "push Ith value of the array field F from the Tth transaction in the current group"},
- {"gtxnsa", "push Ith value of the array field F from the Ath transaction in the current group"},
- {"global", "push value from globals to stack"},
- {"load", "copy a value from scratch space to the stack"},
- {"store", "pop a value from the stack and store to scratch space"},
- {"bnz", "branch to TARGET if value X is not zero"},
- {"bz", "branch to TARGET if value X is zero"},
- {"b", "branch unconditionally to TARGET"},
- {"return", "use last value on stack as success value; end"},
- {"pop", "discard value X from stack"},
- {"dup", "duplicate last value on stack"},
- {"dup2", "duplicate two last values on stack: A, B -> A, B, A, B"},
- {"dig", "push the Nth value from the top of the stack. dig 0 is equivalent to dup"},
- {"swap", "swaps two last values on stack: A, B -> B, A"},
- {"select", "selects one of two values based on top-of-stack: A, B, C -> (if C != 0 then B else A)"},
- {"concat", "pop two byte-arrays A and B and join them, push the result"},
- {"substring", "pop a byte-array A. For immediate values in 0..255 S and E: extract a range of bytes from A starting at S up to but not including E, push the substring result. If E < S, or either is larger than the array length, the program fails"},
- {"substring3", "pop a byte-array A and two integers B and C. Extract a range of bytes from A starting at B up to but not including C, push the substring result. If C < B, or either is larger than the array length, the program fails"},
- {"getbit", "pop a target A (integer or byte-array), and index B. Push the Bth bit of A."},
- {"setbit", "pop a target A, index B, and bit C. Set the Bth bit of A to C, and push the result"},
- {"getbyte", "pop a byte-array A and integer B. Extract the Bth byte of A and push it as an integer"},
- {"setbyte", "pop a byte-array A, integer B, and small integer C (between 0..255). Set the Bth byte of A to C, and push the result"},
- {"balance", "get balance for the requested account specified by Txn.Accounts[A] in microalgos. A is specified as an account index in the Accounts field of the ApplicationCall transaction, zero index means the sender. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted."},
- {"min_balance", "get minimum required balance for the requested account specified by Txn.Accounts[A] in microalgos. A is specified as an account index in the Accounts field of the ApplicationCall transaction, zero index means the sender. Required balance is affected by [ASA](https://developer.algorand.org/docs/features/asa/#assets-overview) and [App](https://developer.algorand.org/docs/features/asc1/stateful/#minimum-balance-requirement-for-a-smart-contract) usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes."},
- {"app_opted_in", "check if account specified by Txn.Accounts[A] opted in for the application B => {0 or 1}"},
- {"app_local_get", "read from account specified by Txn.Accounts[A] from local state of the current application key B => value"},
- {"app_local_get_ex", "read from account specified by Txn.Accounts[A] from local state of the application B key C => [*... stack*, value, 0 or 1]"},
- {"app_global_get", "read key A from global state of a current application => value"},
- {"app_global_get_ex", "read from application Txn.ForeignApps[A] global state key B => [*... stack*, value, 0 or 1]. A is specified as an account index in the ForeignApps field of the ApplicationCall transaction, zero index means this app"},
- {"app_local_put", "write to account specified by Txn.Accounts[A] to local state of a current application key B with value C"},
- {"app_global_put", "write key A and value B to global state of the current application"},
- {"app_local_del", "delete from account specified by Txn.Accounts[A] local state key B of the current application"},
- {"app_global_del", "delete key A from a global state of the current application"},
- {"asset_holding_get", "read from account specified by Txn.Accounts[A] and asset B holding field X (imm arg) => {0 or 1 (top), value}"},
- {"asset_params_get", "read from asset Txn.ForeignAssets[A] params field X (imm arg) => {0 or 1 (top), value}"},
- {"assert", "immediately fail unless value X is a non-zero number"},
+var opDocByName = map[string]string{
+ "err": "Error. Panic immediately. This is primarily a fencepost against accidental zero bytes getting compiled into programs.",
+ "sha256": "SHA256 hash of value X, yields [32]byte",
+ "keccak256": "Keccak256 hash of value X, yields [32]byte",
+ "sha512_256": "SHA512_256 hash of value X, yields [32]byte",
+ "ed25519verify": "for (data A, signature B, pubkey C) verify the signature of (\"ProgData\" || program_hash || data) against the pubkey => {0 or 1}",
+ "+": "A plus B. Panic on overflow.",
+ "-": "A minus B. Panic if B > A.",
+ "/": "A divided by B. Panic if B == 0.",
+ "*": "A times B. Panic on overflow.",
+ "<": "A less than B => {0 or 1}",
+ ">": "A greater than B => {0 or 1}",
+ "<=": "A less than or equal to B => {0 or 1}",
+ ">=": "A greater than or equal to B => {0 or 1}",
+ "&&": "A is not zero and B is not zero => {0 or 1}",
+ "||": "A is not zero or B is not zero => {0 or 1}",
+ "==": "A is equal to B => {0 or 1}",
+ "!=": "A is not equal to B => {0 or 1}",
+ "!": "X == 0 yields 1; else 0",
+ "len": "yields length of byte value X",
+ "itob": "converts uint64 X to big endian bytes",
+ "btoi": "converts bytes X as big endian to uint64",
+ "%": "A modulo B. Panic if B == 0.",
+ "|": "A bitwise-or B",
+ "&": "A bitwise-and B",
+ "^": "A bitwise-xor B",
+ "~": "bitwise invert value X",
+ "mulw": "A times B out to 128-bit long result as low (top) and high uint64 values on the stack",
+ "addw": "A plus B out to 128-bit long result as sum (top) and carry-bit uint64 values on the stack",
+ "divw": "Pop four uint64 values. The deepest two are interpreted as a uint128 dividend (deepest value is high word), the top two are interpreted as a uint128 divisor. Four uint64 values are pushed to the stack. The deepest two are the quotient (deeper value is the high uint64). The top two are the remainder, low bits on top.",
+ "intcblock": "prepare block of uint64 constants for use by intc",
+ "intc": "push Ith constant from intcblock to stack",
+ "intc_0": "push constant 0 from intcblock to stack",
+ "intc_1": "push constant 1 from intcblock to stack",
+ "intc_2": "push constant 2 from intcblock to stack",
+ "intc_3": "push constant 3 from intcblock to stack",
+ "pushint": "push immediate UINT to the stack as an integer",
+ "bytecblock": "prepare block of byte-array constants for use by bytec",
+ "bytec": "push Ith constant from bytecblock to stack",
+ "bytec_0": "push constant 0 from bytecblock to stack",
+ "bytec_1": "push constant 1 from bytecblock to stack",
+ "bytec_2": "push constant 2 from bytecblock to stack",
+ "bytec_3": "push constant 3 from bytecblock to stack",
+ "pushbytes": "push the following program bytes to the stack",
+ "arg": "push Nth LogicSig argument to stack",
+ "arg_0": "push LogicSig argument 0 to stack",
+ "arg_1": "push LogicSig argument 1 to stack",
+ "arg_2": "push LogicSig argument 2 to stack",
+ "arg_3": "push LogicSig argument 3 to stack",
+ "txn": "push field F of current transaction to stack",
+ "gtxn": "push field F of the Tth transaction in the current group",
+ "gtxns": "push field F of the Ath transaction in the current group",
+ "txna": "push Ith value of the array field F of the current transaction",
+ "gtxna": "push Ith value of the array field F from the Tth transaction in the current group",
+ "gtxnsa": "push Ith value of the array field F from the Ath transaction in the current group",
+ "global": "push value from globals to stack",
+ "load": "copy a value from scratch space to the stack",
+ "store": "pop a value from the stack and store to scratch space",
+ "bnz": "branch to TARGET if value X is not zero",
+ "bz": "branch to TARGET if value X is zero",
+ "b": "branch unconditionally to TARGET",
+ "return": "use last value on stack as success value; end",
+ "pop": "discard value X from stack",
+ "dup": "duplicate last value on stack",
+ "dup2": "duplicate two last values on stack: A, B -> A, B, A, B",
+ "dig": "push the Nth value from the top of the stack. dig 0 is equivalent to dup",
+ "swap": "swaps two last values on stack: A, B -> B, A",
+ "select": "selects one of two values based on top-of-stack: A, B, C -> (if C != 0 then B else A)",
+ "concat": "pop two byte-arrays A and B and join them, push the result",
+ "substring": "pop a byte-array A. For immediate values in 0..255 S and E: extract a range of bytes from A starting at S up to but not including E, push the substring result. If E < S, or either is larger than the array length, the program fails",
+ "substring3": "pop a byte-array A and two integers B and C. Extract a range of bytes from A starting at B up to but not including C, push the substring result. If C < B, or either is larger than the array length, the program fails",
+ "getbit": "pop a target A (integer or byte-array), and index B. Push the Bth bit of A.",
+ "setbit": "pop a target A, index B, and bit C. Set the Bth bit of A to C, and push the result",
+ "getbyte": "pop a byte-array A and integer B. Extract the Bth byte of A and push it as an integer",
+ "setbyte": "pop a byte-array A, integer B, and small integer C (between 0..255). Set the Bth byte of A to C, and push the result",
+ "balance": "get balance for the requested account specified by Txn.Accounts[A] in microalgos. A is specified as an account index in the Accounts field of the ApplicationCall transaction, zero index means the sender. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted.",
+ "min_balance": "get minimum required balance for the requested account specified by Txn.Accounts[A] in microalgos. A is specified as an account index in the Accounts field of the ApplicationCall transaction, zero index means the sender. Required balance is affected by [ASA](https://developer.algorand.org/docs/features/asa/#assets-overview) and [App](https://developer.algorand.org/docs/features/asc1/stateful/#minimum-balance-requirement-for-a-smart-contract) usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes.",
+ "app_opted_in": "check if account specified by Txn.Accounts[A] opted in for the application B => {0 or 1}",
+ "app_local_get": "read from account specified by Txn.Accounts[A] from local state of the current application key B => value",
+ "app_local_get_ex": "read from account specified by Txn.Accounts[A] from local state of the application B key C => [*... stack*, value, 0 or 1]",
+ "app_global_get": "read key A from global state of a current application => value",
+ "app_global_get_ex": "read from application Txn.ForeignApps[A] global state key B => [*... stack*, value, 0 or 1]. A is specified as an account index in the ForeignApps field of the ApplicationCall transaction, zero index means this app",
+ "app_local_put": "write to account specified by Txn.Accounts[A] to local state of a current application key B with value C",
+ "app_global_put": "write key A and value B to global state of the current application",
+ "app_local_del": "delete from account specified by Txn.Accounts[A] local state key B of the current application",
+ "app_global_del": "delete key A from a global state of the current application",
+ "asset_holding_get": "read from account specified by Txn.Accounts[A] and asset B holding field X (imm arg) => {0 or 1 (top), value}",
+ "asset_params_get": "read from asset Txn.ForeignAssets[A] params field X (imm arg) => {0 or 1 (top), value}",
+ "assert": "immediately fail unless value X is a non-zero number",
+ "callsub": "branch unconditionally to TARGET, saving the next instruction on the call stack",
+ "retsub": "pop the top instruction from the call stack and branch to it",
}
-var opDocByName map[string]string
-
// OpDoc returns a description of the op
func OpDoc(opName string) string {
- if opDocByName == nil {
- opDocByName = stringStringListToMap(opDocList)
- }
return opDocByName[opName]
}
-// notes on immediate bytes following the opcode
-var opcodeImmediateNoteList = []stringString{
- {"intcblock", "{varuint length} [{varuint value}, ...]"},
- {"intc", "{uint8 int constant index}"},
- {"pushint", "{varuint int}"},
- {"bytecblock", "{varuint length} [({varuint value length} bytes), ...]"},
- {"bytec", "{uint8 byte constant index}"},
- {"pushbytes", "{varuint length} {bytes}"},
- {"arg", "{uint8 arg index N}"},
- {"txn", "{uint8 transaction field index}"},
- {"gtxn", "{uint8 transaction group index} {uint8 transaction field index}"},
- {"gtxns", "{uint8 transaction field index}"},
- {"txna", "{uint8 transaction field index} {uint8 transaction field array index}"},
- {"gtxna", "{uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}"},
- {"gtxnsa", "{uint8 transaction field index} {uint8 transaction field array index}"},
- {"global", "{uint8 global field index}"},
- {"bnz", "{0..0x7fff forward branch offset, big endian}"},
- {"bz", "{0..0x7fff forward branch offset, big endian}"},
- {"b", "{0..0x7fff forward branch offset, big endian}"},
- {"load", "{uint8 position in scratch space to load from}"},
- {"store", "{uint8 position in scratch space to store to}"},
- {"substring", "{uint8 start position} {uint8 end position}"},
- {"dig", "{uint8 depth}"},
- {"asset_holding_get", "{uint8 asset holding field index}"},
- {"asset_params_get", "{uint8 asset params field index}"},
+var opcodeImmediateNotes = map[string]string{
+ "intcblock": "{varuint length} [{varuint value}, ...]",
+ "intc": "{uint8 int constant index}",
+ "pushint": "{varuint int}",
+ "bytecblock": "{varuint length} [({varuint value length} bytes), ...]",
+ "bytec": "{uint8 byte constant index}",
+ "pushbytes": "{varuint length} {bytes}",
+ "arg": "{uint8 arg index N}",
+ "txn": "{uint8 transaction field index}",
+ "gtxn": "{uint8 transaction group index} {uint8 transaction field index}",
+ "gtxns": "{uint8 transaction field index}",
+ "txna": "{uint8 transaction field index} {uint8 transaction field array index}",
+ "gtxna": "{uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}",
+ "gtxnsa": "{uint8 transaction field index} {uint8 transaction field array index}",
+ "global": "{uint8 global field index}",
+ "bnz": "{int16 branch offset, big endian. (negative offsets are illegal before v4)}",
+ "bz": "{int16 branch offset, big endian. (negative offsets are illegal before v4)}",
+ "b": "{int16 branch offset, big endian. (negative offsets are illegal before v4)}",
+ "load": "{uint8 position in scratch space to load from}",
+ "store": "{uint8 position in scratch space to store to}",
+ "substring": "{uint8 start position} {uint8 end position}",
+ "dig": "{uint8 depth}",
+ "asset_holding_get": "{uint8 asset holding field index}",
+ "asset_params_get": "{uint8 asset params field index}",
}
-var opcodeImmediateNotes map[string]string
// OpImmediateNote returns a short string about immediate data which follows the op byte
func OpImmediateNote(opName string) string {
- if opcodeImmediateNotes == nil {
- opcodeImmediateNotes = stringStringListToMap(opcodeImmediateNoteList)
- }
return opcodeImmediateNotes[opName]
}
// further documentation on the function of the opcode
-var opDocExtraList = []stringString{
- {"ed25519verify", "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack."},
- {"bnz", "The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be well aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Branch offsets are currently limited to forward branches only, 0-0x7fff. A future expansion might make this a signed 16 bit integer allowing for backward branches and looping.\n\nAt LogicSigVersion 2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before LogicSigVersion 2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.)"},
- {"bz", "See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`."},
- {"b", "See `bnz` for details on how branches work. `b` always jumps to the offset."},
- {"intcblock", "`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script."},
- {"bytecblock", "`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script."},
- {"*", "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `mulw`."},
- {"+", "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `addw`."},
- {"txn", "FirstValidTime causes the program to fail. The field is reserved for future use."},
- {"gtxn", "for notes on transaction fields available, see `txn`. If this transaction is _i_ in the group, `gtxn i field` is equivalent to `txn field`."},
- {"gtxns", "for notes on transaction fields available, see `txn`. If top of stack is _i_, `gtxns field` is equivalent to `gtxn _i_ field`. gtxns exists so that _i_ can be calculated, often based on the index of the current transaction."},
- {"btoi", "`btoi` panics if the input is longer than 8 bytes."},
- {"concat", "`concat` panics if the result would be greater than 4096 bytes."},
- {"getbit", "see explanation of bit ordering in setbit"},
- {"setbit", "bit indexing begins with low-order bits in integers. Setting bit 4 to 1 on the integer 0 yields 16 (`int 0x0010`, or 2^4). Indexing begins in the first bytes of a byte-string (as seen in getbyte and substring). Setting bits 0 through 11 to 1 in a 4 byte-array of 0s yields `byte 0xfff00000`"},
- {"app_opted_in", "params: account index, application id (top of the stack on opcode entry). Return: 1 if opted in and 0 otherwise."},
- {"app_local_get", "params: account index, state key. Return: value. The value is zero (of type uint64) if the key does not exist."},
- {"app_local_get_ex", "params: account index, application id, state key. Return: did_exist flag (top of the stack, 1 if exist and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist."},
- {"app_global_get_ex", "params: application index, state key. Return: did_exist flag (top of the stack, 1 if exist and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist."},
- {"app_global_get", "params: state key. Return: value. The value is zero (of type uint64) if the key does not exist."},
- {"app_local_put", "params: account index, state key, value."},
- {"app_local_del", "params: account index, state key.\n\nDeleting a key which is already absent has no effect on the application local state. (In particular, it does _not_ cause the program to fail.)"},
- {"app_global_del", "params: state key.\n\nDeleting a key which is already absent has no effect on the application global state. (In particular, it does _not_ cause the program to fail.)"},
- {"asset_holding_get", "params: account index, asset id. Return: did_exist flag (1 if exist and 0 otherwise), value."},
- {"asset_params_get", "params: txn.ForeignAssets offset. Return: did_exist flag (1 if exist and 0 otherwise), value."},
+var opDocExtras = map[string]string{
+ "ed25519verify": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.",
+ "bnz": "The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be well aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Branch offsets are limited to forward branches only, 0-0x7fff until v4. v4 treats offset as a signed 16 bit integer allowing for backward branches and looping.\n\nAt LogicSigVersion 2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before LogicSigVersion 2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.)",
+ "bz": "See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`.",
+ "b": "See `bnz` for details on how branches work. `b` always jumps to the offset.",
+ "callsub": "The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.`",
+ "retsub": "The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.`",
+ "intcblock": "`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script.",
+ "bytecblock": "`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script.",
+ "*": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `mulw`.",
+ "+": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `addw`.",
+ "/": "`divw` is available to divide the two-element values produced by `mulw` and `addw`.",
+ "txn": "FirstValidTime causes the program to fail. The field is reserved for future use.",
+ "gtxn": "for notes on transaction fields available, see `txn`. If this transaction is _i_ in the group, `gtxn i field` is equivalent to `txn field`.",
+ "gtxns": "for notes on transaction fields available, see `txn`. If top of stack is _i_, `gtxns field` is equivalent to `gtxn _i_ field`. gtxns exists so that _i_ can be calculated, often based on the index of the current transaction.",
+ "btoi": "`btoi` panics if the input is longer than 8 bytes.",
+ "concat": "`concat` panics if the result would be greater than 4096 bytes.",
+ "pushbytes": "pushbytes args are not added to the bytecblock during assembly processes",
+ "pushint": "pushint args are not added to the intcblock during assembly processes",
+ "getbit": "see explanation of bit ordering in setbit",
+ "setbit": "bit indexing begins with low-order bits in integers. Setting bit 4 to 1 on the integer 0 yields 16 (`int 0x0010`, or 2^4). Indexing begins in the first bytes of a byte-string (as seen in getbyte and substring). Setting bits 0 through 11 to 1 in a 4 byte-array of 0s yields `byte 0xfff00000`",
+ "app_opted_in": "params: account index, application id (top of the stack on opcode entry). Return: 1 if opted in and 0 otherwise.",
+ "app_local_get": "params: account index, state key. Return: value. The value is zero (of type uint64) if the key does not exist.",
+ "app_local_get_ex": "params: account index, application id, state key. Return: did_exist flag (top of the stack, 1 if exist and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.",
+ "app_global_get_ex": "params: application index, state key. Return: did_exist flag (top of the stack, 1 if exist and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.",
+ "app_global_get": "params: state key. Return: value. The value is zero (of type uint64) if the key does not exist.",
+ "app_local_put": "params: account index, state key, value.",
+ "app_local_del": "params: account index, state key.\n\nDeleting a key which is already absent has no effect on the application local state. (In particular, it does _not_ cause the program to fail.)",
+ "app_global_del": "params: state key.\n\nDeleting a key which is already absent has no effect on the application global state. (In particular, it does _not_ cause the program to fail.)",
+ "asset_holding_get": "params: account index, asset id. Return: did_exist flag (1 if exist and 0 otherwise), value.",
+ "asset_params_get": "params: txn.ForeignAssets offset. Return: did_exist flag (1 if exist and 0 otherwise), value.",
}
-var opDocExtras map[string]string
-
// OpDocExtra returns extra documentation text about an op
func OpDocExtra(opName string) string {
- if opDocExtras == nil {
- opDocExtras = stringStringListToMap(opDocExtraList)
- }
return opDocExtras[opName]
}
// OpGroup is a grouping of ops for documentation purposes.
-// e.g. "Arithmetic", ["+", "-", ...]
+// e.g. "Arithmetic", ["+": "-", ...]
type OpGroup struct {
GroupName string
Ops []string
@@ -220,56 +200,55 @@ type OpGroup struct {
// OpGroupList is groupings of ops for documentation purposes.
var OpGroupList = []OpGroup{
- {"Arithmetic", []string{"sha256", "keccak256", "sha512_256", "ed25519verify", "+", "-", "/", "*", "<", ">", "<=", ">=", "&&", "||", "==", "!=", "!", "len", "itob", "btoi", "%", "|", "&", "^", "~", "mulw", "addw", "getbit", "setbit", "getbyte", "setbyte", "concat", "substring", "substring3"}},
+ {"Arithmetic", []string{"sha256", "keccak256", "sha512_256", "ed25519verify", "+", "-", "/", "*", "<", ">", "<=", ">=", "&&", "||", "==", "!=", "!", "len", "itob", "btoi", "%", "|", "&", "^", "~", "mulw", "addw", "divw", "getbit", "setbit", "getbyte", "setbyte", "concat", "substring", "substring3"}},
{"Loading Values", []string{"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "pushint", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "pushbytes", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "txn", "gtxn", "txna", "gtxna", "gtxns", "gtxnsa", "global", "load", "store"}},
- {"Flow Control", []string{"err", "bnz", "bz", "b", "return", "pop", "dup", "dup2", "dig", "swap", "select", "assert"}},
+ {"Flow Control", []string{"err", "bnz", "bz", "b", "return", "pop", "dup", "dup2", "dig", "swap", "select", "assert", "callsub", "retsub"}},
{"State Access", []string{"balance", "min_balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get"}},
}
-// OpAllCosts returns an array of the relative cost score for an op by version.
-// If all the costs are the same the array is single entry
-// otherwise it has costs by op version
-func OpAllCosts(opName string) []int {
- cost := OpsByName[LogicVersion][opName].Details.Cost
- costs := make([]int, LogicVersion+1)
- isDifferent := false
+// OpCost indicates the cost of an operation over the range of
+// LogicVersions from From to To.
+type OpCost struct {
+ From int
+ To int
+ Cost int
+}
+
+// OpAllCosts returns an array of the cost score for an op by version.
+// Each entry indicates the cost over a range of versions, so if the
+// cost has remained constant, there is only one result, otherwise
+// each entry shows the cost for a consecutive range of versions,
+// inclusive.
+func OpAllCosts(opName string) []OpCost {
+ var costs []OpCost
for v := 1; v <= LogicVersion; v++ {
- costs[v] = OpsByName[v][opName].Details.Cost
- if costs[v] > 0 && costs[v] != cost {
- isDifferent = true
+ cost := OpsByName[v][opName].Details.Cost
+ if cost == 0 {
+ continue
+ }
+ if costs == nil || cost != costs[len(costs)-1].Cost {
+ costs = append(costs, OpCost{v, v, cost})
+ } else {
+ costs[len(costs)-1].To = v
}
- }
- if !isDifferent {
- return []int{cost}
}
return costs
}
-// see assembler.go TxnTypeNames
-// also used to parse symbolic constants for `int`
-var typeEnumDescriptions = []stringString{
- {string(protocol.UnknownTx), "Unknown type. Invalid transaction"},
- {string(protocol.PaymentTx), "Payment"},
- {string(protocol.KeyRegistrationTx), "KeyRegistration"},
- {string(protocol.AssetConfigTx), "AssetConfig"},
- {string(protocol.AssetTransferTx), "AssetTransfer"},
- {string(protocol.AssetFreezeTx), "AssetFreeze"},
- {string(protocol.ApplicationCallTx), "ApplicationCall"},
-}
-
-// TypeNameDescription returns extra description about a low level protocol transaction Type string
-func TypeNameDescription(typeName string) string {
- for _, ted := range typeEnumDescriptions {
- if typeName == ted.a {
- return ted.b
- }
- }
- return "invalid type name"
+// TypeNameDescriptions contains extra description about a low level
+// protocol transaction Type string, and provide a friendlier type
+// constant name in assembler.
+var TypeNameDescriptions = map[string]string{
+ string(protocol.UnknownTx): "Unknown type. Invalid transaction",
+ string(protocol.PaymentTx): "Payment",
+ string(protocol.KeyRegistrationTx): "KeyRegistration",
+ string(protocol.AssetConfigTx): "AssetConfig",
+ string(protocol.AssetTransferTx): "AssetTransfer",
+ string(protocol.AssetFreezeTx): "AssetFreeze",
+ string(protocol.ApplicationCallTx): "ApplicationCall",
}
-// see assembler.go TxnTypeNames
-// also used to parse symbolic constants for `int`
var onCompletionDescriptions = map[OnCompletionConstType]string{
NoOp: "Only execute the `ApprovalProgram` associated with this application ID, with no additional effects.",
OptIn: "Before executing the `ApprovalProgram`, allocate local state for this application into the sender's account data.",
@@ -291,85 +270,79 @@ func OnCompletionDescription(value uint64) string {
// OnCompletionPreamble describes what the OnCompletion constants represent.
const OnCompletionPreamble = "An application transaction must indicate the action to be taken following the execution of its approvalProgram or clearStateProgram. The constants below describe the available actions."
-var txnFieldDocList = []stringString{
- {"Sender", "32 byte address"},
- {"Fee", "micro-Algos"},
- {"FirstValid", "round number"},
- {"FirstValidTime", "Causes program to fail; reserved for future use"},
- {"LastValid", "round number"},
- {"Receiver", "32 byte address"},
- {"Amount", "micro-Algos"},
- {"CloseRemainderTo", "32 byte address"},
- {"VotePK", "32 byte address"},
- {"SelectionPK", "32 byte address"},
- //{"VoteFirst", ""},
- //{"VoteLast", ""},
- {"TypeEnum", "See table below"},
- {"XferAsset", "Asset ID"},
- {"AssetAmount", "value in Asset's units"},
- {"AssetSender", "32 byte address. Causes clawback of all value of asset from AssetSender if Sender is the Clawback address of the asset."},
- {"AssetReceiver", "32 byte address"},
- {"AssetCloseTo", "32 byte address"},
- {"GroupIndex", "Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1"},
- {"TxID", "The computed ID for this transaction. 32 bytes."},
- {"ApplicationID", "ApplicationID from ApplicationCall transaction"},
- {"OnCompletion", "ApplicationCall transaction on completion action"},
- {"ApplicationArgs", "Arguments passed to the application in the ApplicationCall transaction"},
- {"NumAppArgs", "Number of ApplicationArgs"},
- {"Accounts", "Accounts listed in the ApplicationCall transaction"},
- {"NumAccounts", "Number of Accounts"},
- {"Assets", "Foreign Assets listed in the ApplicationCall transaction"},
- {"NumAssets", "Number of Assets"},
- {"Applications", "Foreign Apps listed in the ApplicationCall transaction"},
- {"NumApplications", "Number of Applications"},
- {"GlobalNumUint", "Number of global state integers in ApplicationCall"},
- {"GlobalNumByteSlice", "Number of global state byteslices in ApplicationCall"},
- {"LocalNumUint", "Number of local state integers in ApplicationCall"},
- {"LocalNumByteSlice", "Number of local state byteslices in ApplicationCall"},
- {"ApprovalProgram", "Approval program"},
- {"ClearStateProgram", "Clear state program"},
- {"RekeyTo", "32 byte Sender's new AuthAddr"},
- {"ConfigAsset", "Asset ID in asset config transaction"},
- {"ConfigAssetTotal", "Total number of units of this asset created"},
- {"ConfigAssetDecimals", "Number of digits to display after the decimal place when displaying the asset"},
- {"ConfigAssetDefaultFrozen", "Whether the asset's slots are frozen by default or not, 0 or 1"},
- {"ConfigAssetUnitName", "Unit name of the asset"},
- {"ConfigAssetName", "The asset name"},
- {"ConfigAssetURL", "URL"},
- {"ConfigAssetMetadataHash", "32 byte commitment to some unspecified asset metadata"},
- {"ConfigAssetManager", "32 byte address"},
- {"ConfigAssetReserve", "32 byte address"},
- {"ConfigAssetFreeze", "32 byte address"},
- {"ConfigAssetClawback", "32 byte address"},
- {"FreezeAsset", "Asset ID being frozen or un-frozen"},
- {"FreezeAssetAccount", "32 byte address of the account whose asset slot is being frozen or un-frozen"},
- {"FreezeAssetFrozen", "The new frozen value, 0 or 1"},
+var txnFieldDocs = map[string]string{
+ "Sender": "32 byte address",
+ "Fee": "micro-Algos",
+ "FirstValid": "round number",
+ "FirstValidTime": "Causes program to fail; reserved for future use",
+ "LastValid": "round number",
+ "Receiver": "32 byte address",
+ "Amount": "micro-Algos",
+ "CloseRemainderTo": "32 byte address",
+ "VotePK": "32 byte address",
+ "SelectionPK": "32 byte address",
+ //"VoteFirst": "",
+ //"VoteLast": "",
+ "TypeEnum": "See table below",
+ "XferAsset": "Asset ID",
+ "AssetAmount": "value in Asset's units",
+ "AssetSender": "32 byte address. Causes clawback of all value of asset from AssetSender if Sender is the Clawback address of the asset.",
+ "AssetReceiver": "32 byte address",
+ "AssetCloseTo": "32 byte address",
+ "GroupIndex": "Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1",
+ "TxID": "The computed ID for this transaction. 32 bytes.",
+ "ApplicationID": "ApplicationID from ApplicationCall transaction",
+ "OnCompletion": "ApplicationCall transaction on completion action",
+ "ApplicationArgs": "Arguments passed to the application in the ApplicationCall transaction",
+ "NumAppArgs": "Number of ApplicationArgs",
+ "Accounts": "Accounts listed in the ApplicationCall transaction",
+ "NumAccounts": "Number of Accounts",
+ "Assets": "Foreign Assets listed in the ApplicationCall transaction",
+ "NumAssets": "Number of Assets",
+ "Applications": "Foreign Apps listed in the ApplicationCall transaction",
+ "NumApplications": "Number of Applications",
+ "GlobalNumUint": "Number of global state integers in ApplicationCall",
+ "GlobalNumByteSlice": "Number of global state byteslices in ApplicationCall",
+ "LocalNumUint": "Number of local state integers in ApplicationCall",
+ "LocalNumByteSlice": "Number of local state byteslices in ApplicationCall",
+ "ApprovalProgram": "Approval program",
+ "ClearStateProgram": "Clear state program",
+ "RekeyTo": "32 byte Sender's new AuthAddr",
+ "ConfigAsset": "Asset ID in asset config transaction",
+ "ConfigAssetTotal": "Total number of units of this asset created",
+ "ConfigAssetDecimals": "Number of digits to display after the decimal place when displaying the asset",
+ "ConfigAssetDefaultFrozen": "Whether the asset's slots are frozen by default or not, 0 or 1",
+ "ConfigAssetUnitName": "Unit name of the asset",
+ "ConfigAssetName": "The asset name",
+ "ConfigAssetURL": "URL",
+ "ConfigAssetMetadataHash": "32 byte commitment to some unspecified asset metadata",
+ "ConfigAssetManager": "32 byte address",
+ "ConfigAssetReserve": "32 byte address",
+ "ConfigAssetFreeze": "32 byte address",
+ "ConfigAssetClawback": "32 byte address",
+ "FreezeAsset": "Asset ID being frozen or un-frozen",
+ "FreezeAssetAccount": "32 byte address of the account whose asset slot is being frozen or un-frozen",
+ "FreezeAssetFrozen": "The new frozen value, 0 or 1",
}
-// TxnFieldDocs are notes on fields available by `txn` and `gtxn`
-var txnFieldDocs map[string]string
-
// TxnFieldDocs are notes on fields available by `txn` and `gtxn` with extra versioning info if any
func TxnFieldDocs() map[string]string {
return fieldsDocWithExtra(txnFieldDocs, txnFieldSpecByName)
}
-var globalFieldDocList = []stringString{
- {"MinTxnFee", "micro Algos"},
- {"MinBalance", "micro Algos"},
- {"MaxTxnLife", "rounds"},
- {"ZeroAddress", "32 byte address of all zero bytes"},
- {"GroupSize", "Number of transactions in this atomic transaction group. At least 1"},
- {"LogicSigVersion", "Maximum supported TEAL version"},
- {"Round", "Current round number"},
- {"LatestTimestamp", "Last confirmed block UNIX timestamp. Fails if negative"},
- {"CurrentApplicationID", "ID of current application executing. Fails if no such application is executing"},
- {"CreatorAddress", "Address of the creator of the current application. Fails if no such application is executing"},
+var globalFieldDocs = map[string]string{
+ "MinTxnFee": "micro Algos",
+ "MinBalance": "micro Algos",
+ "MaxTxnLife": "rounds",
+ "ZeroAddress": "32 byte address of all zero bytes",
+ "GroupSize": "Number of transactions in this atomic transaction group. At least 1",
+ "LogicSigVersion": "Maximum supported TEAL version",
+ "Round": "Current round number",
+ "LatestTimestamp": "Last confirmed block UNIX timestamp. Fails if negative",
+ "CurrentApplicationID": "ID of current application executing. Fails if no such application is executing",
+ "CreatorAddress": "Address of the creator of the current application. Fails if no such application is executing",
}
-// globalFieldDocs are notes on fields available in `global`
-var globalFieldDocs map[string]string
-
// GlobalFieldDocs are notes on fields available in `global` with extra versioning info if any
func GlobalFieldDocs() map[string]string {
return fieldsDocWithExtra(globalFieldDocs, globalFieldSpecByName)
@@ -398,34 +371,23 @@ func fieldsDocWithExtra(source map[string]string, ex extractor) map[string]strin
return result
}
-var assetHoldingFieldDocList = []stringString{
- {"AssetBalance", "Amount of the asset unit held by this account"},
- {"AssetFrozen", "Is the asset frozen or not"},
-}
-
// AssetHoldingFieldDocs are notes on fields available in `asset_holding_get`
-var AssetHoldingFieldDocs map[string]string
-
-var assetParamsFieldDocList = []stringString{
- {"AssetTotal", "Total number of units of this asset"},
- {"AssetDecimals", "See AssetParams.Decimals"},
- {"AssetDefaultFrozen", "Frozen by default or not"},
- {"AssetUnitName", "Asset unit name"},
- {"AssetName", "Asset name"},
- {"AssetURL", "URL with additional info about the asset"},
- {"AssetMetadataHash", "Arbitrary commitment"},
- {"AssetManager", "Manager commitment"},
- {"AssetReserve", "Reserve address"},
- {"AssetFreeze", "Freeze address"},
- {"AssetClawback", "Clawback address"},
+var AssetHoldingFieldDocs = map[string]string{
+ "AssetBalance": "Amount of the asset unit held by this account",
+ "AssetFrozen": "Is the asset frozen or not",
}
// AssetParamsFieldDocs are notes on fields available in `asset_params_get`
-var AssetParamsFieldDocs map[string]string
-
-func init() {
- txnFieldDocs = stringStringListToMap(txnFieldDocList)
- globalFieldDocs = stringStringListToMap(globalFieldDocList)
- AssetHoldingFieldDocs = stringStringListToMap(assetHoldingFieldDocList)
- AssetParamsFieldDocs = stringStringListToMap(assetParamsFieldDocList)
+var AssetParamsFieldDocs = map[string]string{
+ "AssetTotal": "Total number of units of this asset",
+ "AssetDecimals": "See AssetParams.Decimals",
+ "AssetDefaultFrozen": "Frozen by default or not",
+ "AssetUnitName": "Asset unit name",
+ "AssetName": "Asset name",
+ "AssetURL": "URL with additional info about the asset",
+ "AssetMetadataHash": "Arbitrary commitment",
+ "AssetManager": "Manager commitment",
+ "AssetReserve": "Reserve address",
+ "AssetFreeze": "Freeze address",
+ "AssetClawback": "Clawback address",
}
diff --git a/data/transactions/logic/doc_test.go b/data/transactions/logic/doc_test.go
index 75568d2e0..c0aeb4574 100644
--- a/data/transactions/logic/doc_test.go
+++ b/data/transactions/logic/doc_test.go
@@ -27,16 +27,16 @@ func TestOpDocs(t *testing.T) {
for _, op := range OpSpecs {
opsSeen[op.Name] = false
}
- for _, od := range opDocList {
- _, exists := opsSeen[od.a]
+ for name := range opDocByName {
+ _, exists := opsSeen[name]
if !exists {
- t.Errorf("error: doc for op %#v that does not exist in OpSpecs", od.a)
+ t.Errorf("error: doc for op %#v that does not exist in OpSpecs", name)
}
- opsSeen[od.a] = true
+ opsSeen[name] = true
}
for op, seen := range opsSeen {
if !seen {
- t.Errorf("error: doc for op %#v missing", op)
+ t.Errorf("error: doc for op %#v missing from opDocByName", op)
}
}
}
@@ -86,25 +86,16 @@ func TestOpDocExtra(t *testing.T) {
func TestOpAllCosts(t *testing.T) {
a := OpAllCosts("+")
- require.Equal(t, 1, len(a))
- require.Equal(t, 1, a[0])
+ require.Len(t, a, 1)
+ require.Equal(t, 1, a[0].Cost)
a = OpAllCosts("sha256")
- require.True(t, len(a) > 1)
- for v := 1; v <= LogicVersion; v++ {
- require.True(t, a[v] > 1)
+ require.Len(t, a, 2)
+ for _, cost := range a {
+ require.True(t, cost.Cost > 1)
}
}
-func TestTypeNameDescription(t *testing.T) {
- require.Equal(t, len(TxnTypeNames), len(typeEnumDescriptions))
- for i, a := range TxnTypeNames {
- b := TypeNameDescription(a)
- require.Equal(t, b, typeEnumDescriptions[i].b)
- }
- require.Equal(t, "invalid type name", TypeNameDescription("invalid type name"))
-}
-
func TestOnCompletionDescription(t *testing.T) {
desc := OnCompletionDescription(0)
require.Equal(t, "Only execute the `ApprovalProgram` associated with this application ID, with no additional effects.", desc)
diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go
index 3a5fe8e67..91557d3bc 100644
--- a/data/transactions/logic/eval.go
+++ b/data/transactions/logic/eval.go
@@ -28,7 +28,6 @@ import (
"math"
"math/big"
"runtime"
- "sort"
"strings"
"golang.org/x/crypto/sha3"
@@ -176,7 +175,7 @@ type EvalParams struct {
}
type opEvalFunc func(cx *evalContext)
-type opCheckFunc func(cx *evalContext) int
+type opCheckFunc func(cx *evalContext) error
type runMode uint64
@@ -208,6 +207,13 @@ func (r runMode) String() string {
return "Unknown"
}
+func (ep EvalParams) budget() int {
+ if ep.runModeFlags == runModeSignature {
+ return int(ep.Proto.LogicSigMaxCost)
+ }
+ return ep.Proto.MaxAppProgramCost
+}
+
func (ep EvalParams) log() logging.Logger {
if ep.Logger != nil {
return ep.Logger
@@ -218,22 +224,28 @@ func (ep EvalParams) log() logging.Logger {
type evalContext struct {
EvalParams
- stack []stackValue
- program []byte // txn.Lsig.Logic ?
- pc int
- nextpc int
- err error
- intc []uint64
- bytec [][]byte
- version uint64
- scratch [256]stackValue
-
- stepCount int
- cost int
-
- // Ordered set of pc values that a branch could go to.
- // If Check pc skips a target, the source branch was invalid!
- branchTargets []int
+ stack []stackValue
+ callstack []int
+ program []byte // txn.Lsig.Logic ?
+ pc int
+ nextpc int
+ err error
+ intc []uint64
+ bytec [][]byte
+ version uint64
+ scratch [256]stackValue
+
+ cost int // cost incurred so far
+
+ // Set of PC values that branches we've seen so far might
+ // go. So, if checkStep() skips one, that branch is trying to
+ // jump into the middle of a multibyte instruction
+ branchTargets map[int]bool
+
+ // Set of PC values that we have begun a checkStep() with. So
+ // if a back jump is going to a value that isn't here, it's
+ // jumping into the middle of multibyte instruction.
+ instructionStarts map[int]bool
programHashCached crypto.Digest
txidCache map[int]transactions.Txid
@@ -297,11 +309,7 @@ func EvalStateful(program []byte, params EvalParams) (pass bool, err error) {
var cx evalContext
cx.EvalParams = params
cx.runModeFlags = runModeApplication
-
- // Evaluate the program
- pass, err = eval(program, &cx)
-
- return pass, err
+ return eval(program, &cx)
}
// Eval checks to see if a transaction passes logic
@@ -376,8 +384,8 @@ func eval(program []byte, cx *evalContext) (pass bool, err error) {
minVersion = *cx.EvalParams.MinTealVersion
}
if version < minVersion {
- err = fmt.Errorf("program version must be >= %d for this transaction group, but have version %d", minVersion, version)
- return
+ cx.err = fmt.Errorf("program version must be >= %d for this transaction group, but have version %d", minVersion, version)
+ return false, cx.err
}
cx.version = version
@@ -400,10 +408,6 @@ func eval(program []byte, cx *evalContext) (pass bool, err error) {
}
cx.step()
- cx.stepCount++
- if cx.stepCount > len(cx.program) {
- return false, errLoopDetected
- }
}
if cx.err != nil {
if cx.Trace != nil {
@@ -427,26 +431,29 @@ func eval(program []byte, cx *evalContext) (pass bool, err error) {
return cx.stack[0].Uint != 0, nil
}
-// CheckStateful should be faster than EvalStateful.
-// Returns 'cost' which is an estimate of relative execution time.
-func CheckStateful(program []byte, params EvalParams) (cost int, err error) {
+// CheckStateful should be faster than EvalStateful. It can perform
+// static checks and reject programs that are invalid. Prior to v4,
+// these static checks include a cost estimate that must be low enough
+// (controlled by params.Proto).
+func CheckStateful(program []byte, params EvalParams) error {
params.runModeFlags = runModeApplication
return check(program, params)
}
-// Check should be faster than Eval.
-// Returns 'cost' which is an estimate of relative execution time.
-func Check(program []byte, params EvalParams) (cost int, err error) {
+// Check should be faster than Eval. It can perform static checks and
+// reject programs that are invalid. Prior to v4, these static checks
+// include a cost estimate that must be low enough (controlled by
+// params.Proto).
+func Check(program []byte, params EvalParams) error {
params.runModeFlags = runModeSignature
return check(program, params)
}
-func check(program []byte, params EvalParams) (cost int, err error) {
+func check(program []byte, params EvalParams) (err error) {
defer func() {
if x := recover(); x != nil {
buf := make([]byte, 16*1024)
stlen := runtime.Stack(buf, false)
- cost = 0
errstr := string(buf[:stlen])
if params.Trace != nil {
if sb, ok := params.Trace.(*strings.Builder); ok {
@@ -458,22 +465,17 @@ func check(program []byte, params EvalParams) (cost int, err error) {
}
}()
if (params.Proto == nil) || (params.Proto.LogicSigVersion == 0) {
- err = errLogicSigNotSupported
- return
+ return errLogicSigNotSupported
}
- var cx evalContext
version, vlen := binary.Uvarint(program)
if vlen <= 0 {
- cx.err = errors.New("invalid version")
- return 0, cx.err
+ return errors.New("invalid version")
}
if version > EvalMaxVersion {
- err = fmt.Errorf("program version %d greater than max supported version %d", version, EvalMaxVersion)
- return
+ return fmt.Errorf("program version %d greater than max supported version %d", version, EvalMaxVersion)
}
if version > params.Proto.LogicSigVersion {
- err = fmt.Errorf("program version %d greater than protocol supported version %d", version, params.Proto.LogicSigVersion)
- return
+ return fmt.Errorf("program version %d greater than protocol supported version %d", version, params.Proto.LogicSigVersion)
}
var minVersion uint64
@@ -483,31 +485,41 @@ func check(program []byte, params EvalParams) (cost int, err error) {
minVersion = *params.MinTealVersion
}
if version < minVersion {
- err = fmt.Errorf("program version must be >= %d for this transaction group, but have version %d", minVersion, version)
- return
+ return fmt.Errorf("program version must be >= %d for this transaction group, but have version %d", minVersion, version)
}
+ var cx evalContext
cx.version = version
cx.pc = vlen
cx.EvalParams = params
cx.program = program
+ cx.branchTargets = make(map[int]bool)
+ cx.instructionStarts = make(map[int]bool)
+ maxCost := params.budget()
+ if version >= backBranchEnabledVersion {
+ maxCost = math.MaxInt32
+ }
+ staticCost := 0
for cx.pc < len(cx.program) {
prevpc := cx.pc
- cost += cx.checkStep()
- if cx.err != nil {
- break
+ stepCost, err := cx.checkStep()
+ if err != nil {
+ return fmt.Errorf("pc=%3d %w", cx.pc, err)
+ }
+ staticCost += stepCost
+ if staticCost > maxCost {
+ return fmt.Errorf("pc=%3d static cost budget of %d exceeded", cx.pc, maxCost)
}
if cx.pc <= prevpc {
- err = fmt.Errorf("pc did not advance, stuck at %d", cx.pc)
- return
+ // Recall, this is advancing through opcodes
+ // without evaluation. It always goes forward,
+ // even if we're in v4 and the jump would go
+ // back.
+ return fmt.Errorf("pc did not advance, stuck at %d", cx.pc)
}
}
- if cx.err != nil {
- err = fmt.Errorf("%3d %s", cx.pc, cx.err)
- return
- }
- return
+ return nil
}
func opCompat(expected, got StackType) bool {
@@ -568,13 +580,34 @@ func (cx *evalContext) step() {
return
}
cx.cost += deets.Cost
+ if cx.cost > cx.budget() {
+ cx.err = fmt.Errorf("pc=%3d dynamic cost budget of %d exceeded, executing %s", cx.pc, cx.budget(), spec.Name)
+ return
+ }
spec.op(cx)
if cx.Trace != nil {
- immArgsString := " "
- if spec.Name != "bnz" {
- for i := 1; i < spec.Details.Size; i++ {
- immArgsString += fmt.Sprintf("0x%02x ", cx.program[cx.pc+i])
+ // This code used to do a little disassembly on its
+ // own, but then it missed out on some nuances like
+ // getting the field names instead of constants in the
+ // txn opcodes. To get them, we conjure up a
+ // disassembleState from the current execution state,
+ // and use the existing disassembly routines. It
+ // feels a little funny to make a disassembleState
+ // right here, rather than build it as we go, or
+ // perhaps we could have an interface that allows
+ // disassembly to use the cx directly. But for now,
+ // we don't want to worry about the dissassembly
+ // routines mucking about in the excution context
+ // (changing the pc, for example) and this gives a big
+ // improvement of dryrun readability
+ dstate := &disassembleState{program: cx.program, pc: cx.pc, numericTargets: true, intc: cx.intc, bytec: cx.bytec}
+ var sourceLine string
+ sourceLine, err := spec.dis(dstate, spec)
+ if err != nil {
+ if cx.err == nil { // don't override an error from evaluation
+ cx.err = err
}
+ return
}
var stackString string
if len(cx.stack) == 0 {
@@ -596,7 +629,7 @@ func (cx *evalContext) step() {
}
}
}
- fmt.Fprintf(cx.Trace, "%3d %s%s=> %s\n", cx.pc, spec.Name, immArgsString, stackString)
+ fmt.Fprintf(cx.Trace, "%3d %s => %s\n", cx.pc, sourceLine, stackString)
}
if cx.err != nil {
return
@@ -614,25 +647,26 @@ func (cx *evalContext) step() {
}
}
-func (cx *evalContext) checkStep() (cost int) {
+func (cx *evalContext) checkStep() (int, error) {
+ cx.instructionStarts[cx.pc] = true
opcode := cx.program[cx.pc]
spec := &opsByOpcode[cx.version][opcode]
if spec.op == nil {
- cx.err = fmt.Errorf("%3d illegal opcode 0x%02x", cx.pc, opcode)
- return 1
+ return 0, fmt.Errorf("%3d illegal opcode 0x%02x", cx.pc, opcode)
}
if (cx.runModeFlags & spec.Modes) == 0 {
- cx.err = fmt.Errorf("%s not allowed in current mode", spec.Name)
- return
+ return 0, fmt.Errorf("%s not allowed in current mode", spec.Name)
}
deets := spec.Details
if deets.Size != 0 && (cx.pc+deets.Size > len(cx.program)) {
- cx.err = fmt.Errorf("%3d %s program ends short of immediate values", cx.pc, spec.Name)
- return 1
+ return 0, fmt.Errorf("%3d %s program ends short of immediate values", cx.pc, spec.Name)
}
prevpc := cx.pc
if deets.checkFunc != nil {
- cost = deets.checkFunc(cx)
+ err := deets.checkFunc(cx)
+ if err != nil {
+ return 0, err
+ }
if cx.nextpc != 0 {
cx.pc = cx.nextpc
cx.nextpc = 0
@@ -640,26 +674,19 @@ func (cx *evalContext) checkStep() (cost int) {
cx.pc += deets.Size
}
} else {
- cost = deets.Cost
cx.pc += deets.Size
}
if cx.Trace != nil {
fmt.Fprintf(cx.Trace, "%3d %s\n", prevpc, spec.Name)
}
- if cx.err != nil {
- return 1
- }
- if len(cx.branchTargets) > 0 {
- if cx.branchTargets[0] < cx.pc {
- cx.err = fmt.Errorf("branch target at %d not an aligned instruction", cx.branchTargets[0])
- return 1
- }
- for len(cx.branchTargets) > 0 && cx.branchTargets[0] == cx.pc {
- // checks okay
- cx.branchTargets = cx.branchTargets[1:]
+ if cx.err == nil {
+ for pc := prevpc + 1; pc < cx.pc; pc++ {
+ if _, ok := cx.branchTargets[pc]; ok {
+ return 0, fmt.Errorf("branch target %d is not an aligned instruction", pc)
+ }
}
}
- return
+ return deets.Cost, nil
}
func opErr(cx *evalContext) {
@@ -757,6 +784,41 @@ func opAddw(cx *evalContext) {
cx.stack[last].Uint = sum
}
+func uint128(hi uint64, lo uint64) *big.Int {
+ whole := new(big.Int).SetUint64(hi)
+ whole.Lsh(whole, 64)
+ whole.Add(whole, new(big.Int).SetUint64(lo))
+ return whole
+}
+
+func opDivwImpl(hiNum, loNum, hiDen, loDen uint64) (hiQuo uint64, loQuo uint64, hiRem uint64, loRem uint64) {
+ dividend := uint128(hiNum, loNum)
+ divisor := uint128(hiDen, loDen)
+
+ quo, rem := new(big.Int).QuoRem(dividend, divisor, new(big.Int))
+ return new(big.Int).Rsh(quo, 64).Uint64(),
+ quo.Uint64(),
+ new(big.Int).Rsh(rem, 64).Uint64(),
+ rem.Uint64()
+}
+
+func opDivw(cx *evalContext) {
+ loDen := len(cx.stack) - 1
+ hiDen := loDen - 1
+ if cx.stack[loDen].Uint == 0 && cx.stack[hiDen].Uint == 0 {
+ cx.err = errors.New("/ 0")
+ return
+ }
+ loNum := loDen - 2
+ hiNum := loDen - 3
+ hiQuo, loQuo, hiRem, loRem :=
+ opDivwImpl(cx.stack[hiNum].Uint, cx.stack[loNum].Uint, cx.stack[hiDen].Uint, cx.stack[loDen].Uint)
+ cx.stack[hiNum].Uint = hiQuo
+ cx.stack[loNum].Uint = loQuo
+ cx.stack[hiDen].Uint = hiRem
+ cx.stack[loDen].Uint = loRem
+}
+
func opMinus(cx *evalContext) {
last := len(cx.stack) - 1
prev := last - 1
@@ -1134,29 +1196,41 @@ func opArg3(cx *evalContext) {
opArgN(cx, 3)
}
-// checks any branch that is {op} {int16 be offset}
-func checkBranch(cx *evalContext) int {
- offset := (uint(cx.program[cx.pc+1]) << 8) | uint(cx.program[cx.pc+2])
- if offset > 0x7fff {
- cx.err = fmt.Errorf("branch offset %x too large", offset)
- return 1
+func branchTarget(cx *evalContext) (int, error) {
+ offset := int16(uint16(cx.program[cx.pc+1])<<8 | uint16(cx.program[cx.pc+2]))
+ if offset < 0 && cx.version < backBranchEnabledVersion {
+ return 0, fmt.Errorf("negative branch offset %x", offset)
}
- cx.nextpc = cx.pc + 3
- target := cx.nextpc + int(offset)
+ target := cx.pc + 3 + int(offset)
var branchTooFar bool
if cx.version >= 2 {
// branching to exactly the end of the program (target == len(cx.program)), the next pc after the last instruction, is okay and ends normally
- branchTooFar = target > len(cx.program)
+ branchTooFar = target > len(cx.program) || target < 0
} else {
- branchTooFar = target >= len(cx.program)
+ branchTooFar = target >= len(cx.program) || target < 0
}
if branchTooFar {
- cx.err = errors.New("branch target beyond end of program")
- return 1
+ return 0, errors.New("branch target beyond end of program")
+ }
+
+ return target, nil
+}
+
+// checks any branch that is {op} {int16 be offset}
+func checkBranch(cx *evalContext) error {
+ cx.nextpc = cx.pc + 3
+ target, err := branchTarget(cx)
+ if err != nil {
+ return err
+ }
+ if target < cx.nextpc {
+ // If a branch goes backwards, we should have already noted that an instruction began at that location.
+ if _, ok := cx.instructionStarts[target]; !ok {
+ return fmt.Errorf("back branch target %d is not an aligned instruction", target)
+ }
}
- cx.branchTargets = append(cx.branchTargets, target)
- sort.Ints(cx.branchTargets)
- return 1
+ cx.branchTargets[target] = true
+ return nil
}
func opBnz(cx *evalContext) {
last := len(cx.stack) - 1
@@ -1164,12 +1238,12 @@ func opBnz(cx *evalContext) {
isNonZero := cx.stack[last].Uint != 0
cx.stack = cx.stack[:last] // pop
if isNonZero {
- offset := (uint(cx.program[cx.pc+1]) << 8) | uint(cx.program[cx.pc+2])
- if offset > 0x7fff {
- cx.err = fmt.Errorf("bnz offset %x too large", offset)
+ target, err := branchTarget(cx)
+ if err != nil {
+ cx.err = err
return
}
- cx.nextpc += int(offset)
+ cx.nextpc = target
}
}
@@ -1179,22 +1253,38 @@ func opBz(cx *evalContext) {
isZero := cx.stack[last].Uint == 0
cx.stack = cx.stack[:last] // pop
if isZero {
- offset := (uint(cx.program[cx.pc+1]) << 8) | uint(cx.program[cx.pc+2])
- if offset > 0x7fff {
- cx.err = fmt.Errorf("bz offset %x too large", offset)
+ target, err := branchTarget(cx)
+ if err != nil {
+ cx.err = err
return
}
- cx.nextpc += int(offset)
+ cx.nextpc = target
}
}
func opB(cx *evalContext) {
- offset := (uint(cx.program[cx.pc+1]) << 8) | uint(cx.program[cx.pc+2])
- if offset > 0x7fff {
- cx.err = fmt.Errorf("b offset %x too large", offset)
+ target, err := branchTarget(cx)
+ if err != nil {
+ cx.err = err
+ return
+ }
+ cx.nextpc = target
+}
+
+func opCallSub(cx *evalContext) {
+ cx.callstack = append(cx.callstack, cx.pc+3)
+ opB(cx)
+}
+
+func opRetSub(cx *evalContext) {
+ top := len(cx.callstack) - 1
+ if top < 0 {
+ cx.err = errors.New("retsub with empty callstack")
return
}
- cx.nextpc = cx.pc + 3 + int(offset)
+ target := cx.callstack[top]
+ cx.callstack = cx.callstack[:top]
+ cx.nextpc = target
}
func opPop(cx *evalContext) {
@@ -1218,7 +1308,7 @@ func opDig(cx *evalContext) {
depth := int(uint(cx.program[cx.pc+1]))
idx := len(cx.stack) - 1 - depth
// Need to check stack size explicitly here because checkArgs() doesn't understand dig
- // so we can't expect out stack to be prechecked.
+ // so we can't expect our stack to be prechecked.
if idx < 0 {
cx.err = fmt.Errorf("dig %d with stack size = %d", depth, len(cx.stack))
return
diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go
index d5f262b3a..98c791a1e 100644
--- a/data/transactions/logic/evalStateful_test.go
+++ b/data/transactions/logic/evalStateful_test.go
@@ -542,13 +542,13 @@ pop
type desc struct {
source string
eval func([]byte, EvalParams) (bool, error)
- check func([]byte, EvalParams) (int, error)
+ check func([]byte, EvalParams) error
}
tests := map[runMode]desc{
runModeSignature: {
source: opcodesRunModeAny + opcodesRunModeSignature,
eval: func(program []byte, ep EvalParams) (bool, error) { return Eval(program, ep) },
- check: func(program []byte, ep EvalParams) (int, error) { return Check(program, ep) },
+ check: func(program []byte, ep EvalParams) error { return Check(program, ep) },
},
runModeApplication: {
source: opcodesRunModeAny + opcodesRunModeApplication,
@@ -556,7 +556,7 @@ pop
pass, err := EvalStateful(program, ep)
return pass, err
},
- check: func(program []byte, ep EvalParams) (int, error) { return CheckStateful(program, ep) },
+ check: func(program []byte, ep EvalParams) error { return CheckStateful(program, ep) },
},
}
@@ -600,7 +600,7 @@ pop
ep.TxnGroup = txgroup
ep.Ledger = ledger
ep.Txn.Txn.ApplicationID = 100
- _, err := test.check(ops.Program, ep)
+ err := test.check(ops.Program, ep)
require.NoError(t, err)
_, err = test.eval(ops.Program, ep)
if err != nil {
@@ -618,7 +618,7 @@ pop
ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
ep := defaultEvalParams(nil, nil)
- _, err = test.check(ops.Program, ep)
+ err = test.check(ops.Program, ep)
require.NoError(t, err)
_, err = test.eval(ops.Program, ep)
require.Error(t, err)
@@ -640,7 +640,7 @@ pop
ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
ep := defaultEvalParams(nil, nil)
- _, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.Error(t, err)
_, err = EvalStateful(ops.Program, ep)
require.Error(t, err)
@@ -666,7 +666,7 @@ pop
for _, source := range statefulOpcodeCalls {
ops := testProg(t, source, AssemblerMaxVersion)
ep := defaultEvalParams(nil, nil)
- _, err := Check(ops.Program, ep)
+ err := Check(ops.Program, ep)
require.Error(t, err)
_, err = Eval(ops.Program, ep)
require.Error(t, err)
@@ -713,9 +713,8 @@ int 177
==`
ops, err = AssembleStringWithVersion(text, AssemblerMaxVersion)
require.NoError(t, err)
- cost, err := CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err := EvalStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, pass)
@@ -743,9 +742,8 @@ int 13
txn.Txn.Sender: 13,
},
)
- cost, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err = EvalStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, pass)
@@ -755,9 +753,8 @@ func testApp(t *testing.T, program string, ep EvalParams, problems ...string) ba
ops := testProg(t, program, AssemblerMaxVersion)
sb := &strings.Builder{}
ep.Trace = sb
- cost, err := CheckStateful(ops.Program, ep)
+ err := CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
// we only use this to test stateful apps. While, I suppose
// it's *legal* to have an app with no stateful ops, this
@@ -1025,9 +1022,8 @@ byte 0x414c474f
ep := defaultEvalParams(nil, nil)
ep.Txn = &txn
ep.TxnGroup = txgroup
- cost, err := CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
_, err = EvalStateful(ops.Program, ep)
require.Error(t, err)
require.Contains(t, err.Error(), "ledger not available")
@@ -1054,9 +1050,8 @@ byte 0x414c474f
ledger.applications[100].GlobalState[string(protocol.PaymentTx)] = basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"}
- cost, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err := EvalStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, pass)
@@ -1242,9 +1237,8 @@ func TestAssets(t *testing.T) {
txn := makeSampleTxn()
ep := defaultEvalParams(nil, nil)
ep.Txn = &txn
- cost, err := CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
_, err = EvalStateful(ops.Program, ep)
require.Error(t, err)
require.Contains(t, err.Error(), "ledger not available")
@@ -1263,9 +1257,8 @@ func TestAssets(t *testing.T) {
ops, err := AssembleStringWithVersion(assetsTestProgram, AssemblerMaxVersion)
require.NoError(t, err)
- cost, err := CheckStateful(ops.Program, defaultEvalParams(nil, nil))
+ err = CheckStateful(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
txn := makeSampleTxn()
sb := strings.Builder{}
@@ -1291,9 +1284,8 @@ func TestAssets(t *testing.T) {
ep := defaultEvalParams(&sb, &txn)
ep.Ledger = ledger
- cost, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err := EvalStateful(ops.Program, ep)
if !pass {
t.Log(hex.EncodeToString(ops.Program))
@@ -1319,9 +1311,8 @@ int 1
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
ledger.setHolding(txn.Txn.Sender, 55, 123, false)
- cost, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err = EvalStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, pass)
@@ -1419,9 +1410,8 @@ int 1
require.NoError(t, err)
params.URL = ""
ledger.newAsset(txn.Txn.Sender, 55, params)
- cost, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err = EvalStateful(ops.Program, ep)
require.Error(t, err)
require.Contains(t, err.Error(), "cannot compare ([]byte == uint64)")
@@ -1487,9 +1477,8 @@ int 100
ep := defaultEvalParams(nil, nil)
ep.Txn = &txn
ep.Txn.Txn.ApplicationID = 100
- cost, err := CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
_, err = EvalStateful(ops.Program, ep)
require.Error(t, err)
require.Contains(t, err.Error(), "ledger not available")
@@ -1586,9 +1575,8 @@ int 0x77
`
ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
- cost, err := CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err := EvalStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, pass)
@@ -1630,9 +1618,8 @@ int 0x77
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
- cost, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err = EvalStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, pass)
@@ -1793,9 +1780,8 @@ int 1
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
- cost, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err = EvalStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, pass)
@@ -1974,9 +1960,8 @@ int 0x77
ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
- cost, err := CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err := EvalStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, pass)
@@ -2091,9 +2076,8 @@ byte 0x414c474f
require.NoError(t, err)
sb := strings.Builder{}
ep.Trace = &sb
- cost, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err = EvalStateful(ops.Program, ep)
if !pass {
t.Log(hex.EncodeToString(ops.Program))
@@ -2372,9 +2356,8 @@ int 1
ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
- cost, err := CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err := EvalStateful(ops.Program, ep)
if !pass {
t.Log(hex.EncodeToString(ops.Program))
@@ -2409,9 +2392,8 @@ app_local_get_ex
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
- cost, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err = EvalStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, pass)
@@ -2446,9 +2428,8 @@ app_local_put
`
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
- cost, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err = EvalStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, pass)
@@ -2479,9 +2460,8 @@ int 1
`
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
- cost, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err = EvalStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, pass)
@@ -2515,9 +2495,8 @@ int 1
`
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
- cost, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err = EvalStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, pass)
@@ -2551,9 +2530,8 @@ int 1
`
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
- cost, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err = EvalStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, pass)
@@ -2793,7 +2771,7 @@ int 1
require.NoError(t, err)
ep := defaultEvalParams(nil, nil)
- _, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
_, err = EvalStateful(ops.Program, ep)
require.Error(t, err)
@@ -2822,7 +2800,7 @@ int 1
require.NoError(t, err)
ep := defaultEvalParams(nil, nil)
- _, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
_, err = EvalStateful(ops.Program, ep)
require.Error(t, err)
@@ -2852,7 +2830,7 @@ int 42
require.NoError(t, err)
ep := defaultEvalParams(nil, nil)
- _, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
_, err = EvalStateful(ops.Program, ep)
require.Error(t, err)
diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go
index 2efe54a29..74dcc251f 100644
--- a/data/transactions/logic/eval_test.go
+++ b/data/transactions/logic/eval_test.go
@@ -51,6 +51,7 @@ func defaultEvalProtoWithVersion(version uint64) config.ConsensusParams {
return config.ConsensusParams{
LogicSigVersion: version,
LogicSigMaxCost: 20000,
+ MaxAppProgramCost: 700,
MaxAppKeyLen: 64,
MaxAppBytesValueLen: 64,
// These must be identical to keep an old backward compat test working
@@ -128,7 +129,7 @@ func TestMinTealVersionParamEvalCheck(t *testing.T) {
// set the teal program version to 1
binary.PutUvarint(program, 1)
- _, err := Check(program, params)
+ err := Check(program, params)
require.Contains(t, err.Error(), fmt.Sprintf("program version must be >= %d", appsEnabledVersion))
// If the param is read correctly, the eval should fail
@@ -197,7 +198,7 @@ func TestWrongProtoVersion(t *testing.T) {
proto.LogicSigVersion = 0
ep := defaultEvalParams(&sb, &txn)
ep.Proto = &proto
- _, err = Check(ops.Program, ep)
+ err = Check(ops.Program, ep)
require.Error(t, err)
require.Contains(t, err.Error(), "LogicSig not supported")
pass, err := Eval(ops.Program, ep)
@@ -232,9 +233,8 @@ byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E=
txn.Lsig.Args = [][]byte{[]byte("=0\x97S\x85H\xe9\x91B\xfd\xdb;1\xf5Z\xaec?\xae\xf2I\x93\x08\x12\x94\xaa~\x06\x08\x849b")}
sb := strings.Builder{}
ep := defaultEvalParams(&sb, &txn)
- cost, err := Check(ops.Program, ep)
+ err = Check(ops.Program, ep)
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err := Eval(ops.Program, ep)
require.True(t, pass)
require.NoError(t, err)
@@ -294,13 +294,12 @@ func TestTLHC(t *testing.T) {
sb := strings.Builder{}
block := bookkeeping.Block{}
ep := defaultEvalParams(&sb, &txn)
- cost, err := Check(ops.Program, ep)
+ err = Check(ops.Program, ep)
if err != nil {
t.Log(hex.EncodeToString(ops.Program))
t.Log(sb.String())
}
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err := Eval(ops.Program, ep)
if pass {
t.Log(hex.EncodeToString(ops.Program))
@@ -536,34 +535,65 @@ int 1 // ret 1
`, 2)
}
+func TestUint128(t *testing.T) {
+ x := uint128(0, 3)
+ require.Equal(t, x.String(), "3")
+ x = uint128(0, 0)
+ require.Equal(t, x.String(), "0")
+ x = uint128(1, 3)
+ require.Equal(t, x.String(), "18446744073709551619")
+ x = uint128(1, 5)
+ require.Equal(t, x.String(), "18446744073709551621")
+ x = uint128(^uint64(0), ^uint64(0)) // maximum uint128 = 2^64-1
+ require.Equal(t, x.String(), "340282366920938463463374607431768211455")
+}
+
+func TestDivw(t *testing.T) {
+ t.Parallel()
+ // 2:0 / 1:0 == 2r0 == 0:2,0:0
+ testAccepts(t, `int 2; int 0; int 1; int 0; divw;
+ int 0; ==; assert;
+ int 0; ==; assert;
+ int 2; ==; assert;
+ int 0; ==; assert; int 1`, 4)
+
+ // 2:0 / 0:1 == 2:0r0 == 2:0,0:0
+ testAccepts(t, `int 2; int 0; int 0; int 1; divw;
+ int 0; ==; assert;
+ int 0; ==; assert;
+ int 0; ==; assert;
+ int 2; ==; assert; int 1`, 4)
+
+ // 0:0 / 0:7 == 0r0
+ testAccepts(t, `int 0; int 0; int 0; int 7; divw;
+ int 0; ==; assert;
+ int 0; ==; assert;
+ int 0; ==; assert;
+ int 0; ==; assert; int 1`, 4)
+
+ // maxu64:maxu64 / maxu64:maxu64 == 1r0
+ testAccepts(t, `int 18446744073709551615; int 18446744073709551615; int 18446744073709551615; int 18446744073709551615;
+ divw;
+ int 0; ==; assert;
+ int 0; ==; assert;
+ int 1; ==; assert;
+ int 0; ==; assert; int 1`, 4)
+
+ // 0:7777 / 1:0 == 0:0r7777 == 0:0,0:7777
+ testAccepts(t, `int 0; int 7777; int 1; int 0; divw;
+ int 7777; ==; assert;
+ int 0; ==; assert;
+ int 0; ==; assert;
+ int 0; ==; assert; int 1`, 4)
+
+ // 10:0 / 0:0 ==> panic
+ testPanics(t, `int 10; int 0; int 0; int 0; divw;
+ pop; pop; pop; pop; int 1`, 4)
+}
+
func TestDivZero(t *testing.T) {
t.Parallel()
- for v := uint64(1); v <= AssemblerMaxVersion; v++ {
- t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(`int 0x111111111
-int 0
-/
-pop
-int 1`, v)
- require.NoError(t, err)
- sb := strings.Builder{}
- cost, err := Check(ops.Program, defaultEvalParams(&sb, nil))
- if err != nil {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.NoError(t, err)
- require.True(t, cost < 1000)
- pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
- if pass {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.False(t, pass)
- require.Error(t, err)
- isNotPanic(t, err)
- })
- }
+ testPanics(t, "int 0x11; int 0; /; pop; int 1", 1)
}
func TestModZero(t *testing.T) {
@@ -576,9 +606,8 @@ int 0
pop
int 1`, v)
require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if pass {
@@ -599,9 +628,8 @@ func TestErr(t *testing.T) {
ops, err := AssembleStringWithVersion(`err
int 1`, v)
require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if pass {
@@ -629,9 +657,8 @@ int 2
int 4
==`, v)
require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if !pass {
@@ -653,9 +680,8 @@ int 0
pop`, v)
require.NoError(t, err)
sb := strings.Builder{}
- cost, err := Check(ops.Program, defaultEvalParams(&sb, nil))
+ err = Check(ops.Program, defaultEvalParams(&sb, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb = strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if !pass {
@@ -676,9 +702,8 @@ func TestStackLeftover(t *testing.T) {
int 1`, v)
require.NoError(t, err)
sb := strings.Builder{}
- cost, err := Check(ops.Program, defaultEvalParams(&sb, nil))
+ err = Check(ops.Program, defaultEvalParams(&sb, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb = strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if pass {
@@ -699,9 +724,8 @@ func TestStackBytesLeftover(t *testing.T) {
ops, err := AssembleStringWithVersion(`byte 0x10101010`, v)
require.NoError(t, err)
sb := strings.Builder{}
- cost, err := Check(ops.Program, defaultEvalParams(&sb, nil))
+ err = Check(ops.Program, defaultEvalParams(&sb, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb = strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if pass {
@@ -724,9 +748,8 @@ int 1
pop
pop`, v)
require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if pass {
@@ -747,9 +770,8 @@ func TestArgTooFar(t *testing.T) {
ops, err := AssembleStringWithVersion(`arg_1
btoi`, v)
require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err) // TODO: Check should know the type stack was wrong
- require.True(t, cost < 1000)
sb := strings.Builder{}
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
@@ -772,9 +794,8 @@ func TestIntcTooFar(t *testing.T) {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops, err := AssembleStringWithVersion(`intc_1`, v)
require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err) // TODO: Check should know the type stack was wrong
- require.True(t, cost < 1000)
sb := strings.Builder{}
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
@@ -798,9 +819,8 @@ func TestBytecTooFar(t *testing.T) {
ops, err := AssembleStringWithVersion(`bytec_1
btoi`, v)
require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err) // TODO: Check should know the type stack was wrong
- require.True(t, cost < 1000)
sb := strings.Builder{}
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
@@ -820,9 +840,8 @@ btoi`, v)
func TestTxnBadField(t *testing.T) {
t.Parallel()
program := []byte{0x01, 0x31, 0x7f}
- cost, err := Check(program, defaultEvalParams(nil, nil))
+ err := Check(program, defaultEvalParams(nil, nil))
require.NoError(t, err) // TODO: Check should know the type stack was wrong
- require.True(t, cost < 1000)
sb := strings.Builder{}
var txn transactions.SignedTxn
txn.Lsig.Logic = program
@@ -857,9 +876,8 @@ func TestTxnBadField(t *testing.T) {
func TestGtxnBadIndex(t *testing.T) {
t.Parallel()
program := []byte{0x01, 0x33, 0x1, 0x01}
- cost, err := Check(program, defaultEvalParams(nil, nil))
+ err := Check(program, defaultEvalParams(nil, nil))
require.NoError(t, err) // TODO: Check should know the type stack was wrong
- require.True(t, cost < 1000)
sb := strings.Builder{}
var txn transactions.SignedTxn
txn.Lsig.Logic = program
@@ -881,9 +899,8 @@ func TestGtxnBadIndex(t *testing.T) {
func TestGtxnBadField(t *testing.T) {
t.Parallel()
program := []byte{0x01, 0x33, 0x0, 0x7f}
- cost, err := Check(program, defaultEvalParams(nil, nil))
+ err := Check(program, defaultEvalParams(nil, nil))
require.NoError(t, err) // TODO: Check should know the type stack was wrong
- require.True(t, cost < 1000)
sb := strings.Builder{}
var txn transactions.SignedTxn
txn.Lsig.Logic = program
@@ -922,8 +939,7 @@ func TestGtxnBadField(t *testing.T) {
func TestGlobalBadField(t *testing.T) {
t.Parallel()
program := []byte{0x01, 0x32, 0x7f}
- cost, err := Check(program, defaultEvalParams(nil, nil))
- require.True(t, cost < 1000)
+ err := Check(program, defaultEvalParams(nil, nil))
require.NoError(t, err) // Check does not validates opcode args
sb := strings.Builder{}
var txn transactions.SignedTxn
@@ -956,9 +972,8 @@ int 9
<
&&`, v)
require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
txn.Lsig.Args = [][]byte{
@@ -1029,13 +1044,17 @@ addr ` + testAddr + `
&&
`
+const globalV4TestProgram = globalV3TestProgram + `
+// No new globals in v4
+`
+
func TestGlobal(t *testing.T) {
t.Parallel()
type desc struct {
lastField GlobalField
program string
eval func([]byte, EvalParams) (bool, error)
- check func([]byte, EvalParams) (int, error)
+ check func([]byte, EvalParams) error
}
tests := map[uint64]desc{
0: {GroupSize, globalV1TestProgram, Eval, Check},
@@ -1048,6 +1067,10 @@ func TestGlobal(t *testing.T) {
CreatorAddress, globalV3TestProgram,
EvalStateful, CheckStateful,
},
+ 4: {
+ CreatorAddress, globalV4TestProgram,
+ EvalStateful, CheckStateful,
+ },
}
ledger := makeTestLedger(nil)
ledger.appID = 42
@@ -1066,9 +1089,8 @@ func TestGlobal(t *testing.T) {
}
}
ops := testProg(t, testProgram, v)
- cost, err := check(ops.Program, defaultEvalParams(nil, nil))
+ err := check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
txgroup := make([]transactions.SignedTxn, 1)
@@ -1078,11 +1100,12 @@ func TestGlobal(t *testing.T) {
block.BlockHeader.Round = 999999
block.BlockHeader.TimeStamp = 2069
proto := config.ConsensusParams{
- MinTxnFee: 123,
- MinBalance: 1000000,
- MaxTxnLife: 999,
- LogicSigVersion: LogicVersion,
- LogicSigMaxCost: 20000,
+ MinTxnFee: 123,
+ MinBalance: 1000000,
+ MaxTxnLife: 999,
+ LogicSigVersion: LogicVersion,
+ LogicSigMaxCost: 20000,
+ MaxAppProgramCost: 700,
}
ep := defaultEvalParams(&sb, &txn)
ep.TxnGroup = txgroup
@@ -1134,9 +1157,8 @@ int %s
&&`, symbol, string(tt))
ops, err := AssembleStringWithVersion(text, v)
require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
var txn transactions.SignedTxn
txn.Txn.Type = tt
sb := strings.Builder{}
@@ -1540,9 +1562,8 @@ func TestTxn(t *testing.T) {
for v, source := range tests {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, source, v)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err := Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
txn := makeSampleTxn()
txn.Txn.ApprovalProgram = ops.Program
txn.Txn.ClearStateProgram = clearOps.Program
@@ -1625,13 +1646,12 @@ return
ops, err := AssembleStringWithVersion(cachedTxnProg, 2)
require.NoError(t, err)
sb := strings.Builder{}
- cost, err := Check(ops.Program, defaultEvalParams(&sb, nil))
+ err = Check(ops.Program, defaultEvalParams(&sb, nil))
if err != nil {
t.Log(hex.EncodeToString(ops.Program))
t.Log(sb.String())
}
require.NoError(t, err)
- require.True(t, cost < 1000)
txn := makeSampleTxn()
txgroup := makeSampleTxnGroup(txn)
txn.Lsig.Logic = ops.Program
@@ -1767,13 +1787,12 @@ func testLogic(t *testing.T, program string, v uint64, ep EvalParams, problems .
sb := &strings.Builder{}
ep.Trace = sb
ep.Txn.Lsig.Logic = ops.Program
- cost, err := Check(ops.Program, ep)
+ err := Check(ops.Program, ep)
if err != nil {
t.Log(hex.EncodeToString(ops.Program))
t.Log(sb.String())
}
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err := Eval(ops.Program, ep)
if len(problems) == 0 {
@@ -1980,9 +1999,8 @@ int 0x300
int 0x310
==`, v)
require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if !pass {
@@ -2024,9 +2042,8 @@ len
int 0
==
&&`, 2)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err := Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if !pass {
@@ -2077,9 +2094,8 @@ concat
dup
concat
len`, 2)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err := Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if pass {
@@ -2104,9 +2120,8 @@ int 4
int 2
substring3
len`, 2)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err := Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if pass {
@@ -2123,9 +2138,8 @@ int 4
int 0xFFFFFFFFFFFFFFFE
substring3
len`, 2)
- cost, err = Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
pass, err = Eval(ops.Program, defaultEvalParams(nil, nil))
require.False(t, pass)
require.Error(t, err)
@@ -2137,9 +2151,8 @@ func TestSubstringRange(t *testing.T) {
ops := testProg(t, `byte 0xf000000000000000
substring 2 99
len`, 2)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err := Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if pass {
@@ -2170,9 +2183,8 @@ load 0
load 1
+
&&`, v)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err := Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if !pass {
@@ -2201,9 +2213,8 @@ int 5
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, progText, v)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err := Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if !pass {
@@ -2282,9 +2293,8 @@ func TestCompares(t *testing.T) {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops, err := AssembleStringWithVersion(testCompareProgramText, v)
require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(nil, nil))
if !pass {
@@ -2313,9 +2323,8 @@ byte 0xc195eca25a6f4c82bfba0287082ddb0d602ae9230f9cf1f1a40b68f8e2c41567
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops, err := AssembleStringWithVersion(progText, v)
require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(nil, nil))
if !pass {
@@ -2348,9 +2357,8 @@ byte 0x98D2C31612EA500279B6753E5F6E780CA63EBA8274049664DAD66A2565ED1D2A
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops, err := AssembleStringWithVersion(progText, v)
require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if !pass {
@@ -2363,6 +2371,53 @@ byte 0x98D2C31612EA500279B6753E5F6E780CA63EBA8274049664DAD66A2565ED1D2A
}
}
+func TestSlowLogic(t *testing.T) {
+ t.Parallel()
+ fragment := `byte 0x666E6F7264; keccak256
+ byte 0xc195eca25a6f4c82bfba0287082ddb0d602ae9230f9cf1f1a40b68f8e2c41567; ==;`
+
+ // Sanity check. Running a short sequence of these fragments passes in all versions.
+ source := fragment + strings.Repeat(fragment+"&&;", 5)
+ testAccepts(t, source, 1)
+
+ // in v1, each repeat costs 30
+ v1overspend := fragment + strings.Repeat(fragment+"&&;", 20000/30)
+ // in v2,v3 each repeat costs 134
+ v2overspend := fragment + strings.Repeat(fragment+"&&;", 20000/134)
+
+ // v1overspend fails (on v1)
+ ops := testProg(t, v1overspend, 1)
+ err := Check(ops.Program, defaultEvalParamsWithVersion(nil, nil, 1))
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "static cost")
+ // v2overspend passes Check, even on v2 proto, because cost is "grandfathered"
+ ops = testProg(t, v2overspend, 1)
+ err = Check(ops.Program, defaultEvalParamsWithVersion(nil, nil, 2))
+ require.NoError(t, err)
+
+ // even the shorter, v2overspend, fails when compiled as v2 code
+ ops = testProg(t, v2overspend, 2)
+ err = Check(ops.Program, defaultEvalParamsWithVersion(nil, nil, 2))
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "static cost")
+
+ // in v4 cost is still 134, but only matters in Eval, not Check, so both fail there
+ ep4 := defaultEvalParamsWithVersion(nil, nil, 4)
+ ops = testProg(t, v1overspend, 4)
+ err = Check(ops.Program, ep4)
+ require.NoError(t, err)
+ _, err = Eval(ops.Program, ep4)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "dynamic cost")
+
+ ops = testProg(t, v2overspend, 4)
+ err = Check(ops.Program, ep4)
+ require.NoError(t, err)
+ _, err = Eval(ops.Program, ep4)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "dynamic cost")
+}
+
func isNotPanic(t *testing.T, err error) {
if err == nil {
return
@@ -2379,9 +2434,8 @@ func TestStackUnderflow(t *testing.T) {
ops, err := AssembleStringWithVersion(`int 1`, v)
ops.Program = append(ops.Program, 0x08) // +
require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if pass {
@@ -2401,9 +2455,8 @@ func TestWrongStackTypeRuntime(t *testing.T) {
ops, err := AssembleStringWithVersion(`int 1`, v)
require.NoError(t, err)
ops.Program = append(ops.Program, 0x01, 0x15) // sha256, len
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if pass {
@@ -2424,9 +2477,8 @@ func TestEqMismatch(t *testing.T) {
int 1`, v)
require.NoError(t, err)
ops.Program = append(ops.Program, 0x12) // ==
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err) // TODO: Check should know the type stack was wrong
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if pass {
@@ -2447,9 +2499,8 @@ func TestNeqMismatch(t *testing.T) {
int 1`, v)
require.NoError(t, err)
ops.Program = append(ops.Program, 0x13) // !=
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err) // TODO: Check should know the type stack was wrong
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if pass {
@@ -2470,9 +2521,8 @@ func TestWrongStackTypeRuntime2(t *testing.T) {
int 1`, v)
require.NoError(t, err)
ops.Program = append(ops.Program, 0x08) // +
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, _ := Eval(ops.Program, defaultEvalParams(&sb, nil))
if pass {
@@ -2497,9 +2547,8 @@ func TestIllegalOp(t *testing.T) {
break
}
}
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.Error(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if pass {
@@ -2524,9 +2573,8 @@ int 1
require.NoError(t, err)
// cut two last bytes - intc_1 and last byte of bnz
ops.Program = ops.Program[:len(ops.Program)-2]
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.Error(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if pass {
@@ -2546,9 +2594,8 @@ intc 0
intc 0
bnz done
done:`, 2)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err := Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
require.NoError(t, err)
@@ -2564,10 +2611,9 @@ func TestShortBytecblock(t *testing.T) {
for i := 2; i < len(fullops.Program); i++ {
program := fullops.Program[:i]
t.Run(hex.EncodeToString(program), func(t *testing.T) {
- cost, err := Check(program, defaultEvalParams(nil, nil))
+ err := Check(program, defaultEvalParams(nil, nil))
require.Error(t, err)
isNotPanic(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(program, defaultEvalParams(&sb, nil))
if pass {
@@ -2594,10 +2640,9 @@ func TestShortBytecblock2(t *testing.T) {
t.Run(src, func(t *testing.T) {
program, err := hex.DecodeString(src)
require.NoError(t, err)
- cost, err := Check(program, defaultEvalParams(nil, nil))
+ err = Check(program, defaultEvalParams(nil, nil))
require.Error(t, err)
isNotPanic(t, err)
- require.True(t, cost < 1000)
sb := strings.Builder{}
pass, err := Eval(program, defaultEvalParams(&sb, nil))
if pass {
@@ -2615,7 +2660,7 @@ const panicString = "out of memory, buffer overrun, stack overflow, divide by ze
func opPanic(cx *evalContext) {
panic(panicString)
}
-func checkPanic(cx *evalContext) int {
+func checkPanic(cx *evalContext) error {
panic(panicString)
}
@@ -2641,7 +2686,7 @@ func TestPanic(t *testing.T) {
sb := strings.Builder{}
params := defaultEvalParams(&sb, nil)
params.Logger = log
- _, err = Check(ops.Program, params)
+ err = Check(ops.Program, params)
require.Error(t, err)
if pe, ok := err.(PanicError); ok {
require.Equal(t, panicString, pe.PanicValue)
@@ -2677,7 +2722,7 @@ func TestProgramTooNew(t *testing.T) {
t.Parallel()
var program [12]byte
vlen := binary.PutUvarint(program[:], EvalMaxVersion+1)
- _, err := Check(program[:vlen], defaultEvalParams(nil, nil))
+ err := Check(program[:vlen], defaultEvalParams(nil, nil))
require.Error(t, err)
isNotPanic(t, err)
pass, err := Eval(program[:vlen], defaultEvalParams(nil, nil))
@@ -2690,7 +2735,7 @@ func TestInvalidVersion(t *testing.T) {
t.Parallel()
program, err := hex.DecodeString("ffffffffffffffffffffffff")
require.NoError(t, err)
- _, err = Check(program, defaultEvalParams(nil, nil))
+ err = Check(program, defaultEvalParams(nil, nil))
require.Error(t, err)
isNotPanic(t, err)
pass, err := Eval(program, defaultEvalParams(nil, nil))
@@ -2708,7 +2753,7 @@ func TestProgramProtoForbidden(t *testing.T) {
}
ep := EvalParams{}
ep.Proto = &proto
- _, err := Check(program[:vlen], ep)
+ err := Check(program[:vlen], ep)
require.Error(t, err)
ep.Txn = &transactions.SignedTxn{}
pass, err := Eval(program[:vlen], ep)
@@ -2733,13 +2778,29 @@ int 1`, v)
require.NoError(t, err)
require.Equal(t, ops.Program, canonicalProgramBytes)
ops.Program[7] = 3 // clobber the branch offset to be in the middle of the bytecblock
- _, err = Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.Error(t, err)
- require.True(t, strings.Contains(err.Error(), "aligned"))
+ require.Contains(t, err.Error(), "aligned")
pass, err := Eval(ops.Program, defaultEvalParams(nil, nil))
require.Error(t, err)
require.False(t, pass)
isNotPanic(t, err)
+
+ // back branches are checked differently, so test misaligned back branch
+ ops.Program[6] = 0xff // Clobber the two bytes of offset with 0xff 0xff = -1
+ ops.Program[7] = 0xff // That jumps into the offset itself (pc + 3 -1)
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
+ require.Error(t, err)
+ if v < backBranchEnabledVersion {
+ require.Contains(t, err.Error(), "negative branch")
+ } else {
+ require.Contains(t, err.Error(), "back branch")
+ require.Contains(t, err.Error(), "aligned")
+ }
+ pass, err = Eval(ops.Program, defaultEvalParams(nil, nil))
+ require.Error(t, err)
+ require.False(t, pass)
+ isNotPanic(t, err)
})
}
}
@@ -2760,7 +2821,7 @@ int 1`, v)
require.NoError(t, err)
require.Equal(t, ops.Program, canonicalProgramBytes)
ops.Program[7] = 200 // clobber the branch offset to be beyond the end of the program
- _, err = Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.Error(t, err)
require.True(t, strings.Contains(err.Error(), "beyond end of program"))
pass, err := Eval(ops.Program, defaultEvalParams(nil, nil))
@@ -2782,17 +2843,18 @@ done:
int 1`, v)
require.NoError(t, err)
//t.Log(hex.EncodeToString(ops.Program))
+ // (br)anch byte, (hi)gh byte of offset, (lo)w byte: brhilo
canonicalProgramString := mutateProgVersion(v, "01200101224000112603040123457604ababcdcd04f000baad22")
canonicalProgramBytes, err := hex.DecodeString(canonicalProgramString)
require.NoError(t, err)
require.Equal(t, ops.Program, canonicalProgramBytes)
- ops.Program[6] = 0xff // clobber the branch offset
- _, err = Check(ops.Program, defaultEvalParams(nil, nil))
+ ops.Program[6] = 0x70 // clobber hi byte of branch offset
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.Error(t, err)
- require.Contains(t, err.Error(), "too large")
+ require.Contains(t, err.Error(), "beyond")
pass, err := Eval(ops.Program, defaultEvalParams(nil, nil))
require.Error(t, err)
- require.Contains(t, err.Error(), "too large")
+ require.Contains(t, err.Error(), "beyond")
require.False(t, pass)
isNotPanic(t, err)
})
@@ -2812,14 +2874,14 @@ int 1
source := fmt.Sprintf(template, line)
ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
- ops.Program[7] = 0xff // clobber the branch offset
+ ops.Program[7] = 0xf0 // clobber the branch offset - highly negative
ops.Program[8] = 0xff // clobber the branch offset
- _, err = Check(ops.Program, ep)
+ err = Check(ops.Program, ep)
require.Error(t, err)
- require.Contains(t, err.Error(), "too large")
+ require.Contains(t, err.Error(), "beyond")
pass, err := Eval(ops.Program, ep)
require.Error(t, err)
- require.Contains(t, err.Error(), "too large")
+ require.Contains(t, err.Error(), "beyond")
require.False(t, pass)
})
}
@@ -3104,9 +3166,8 @@ int 142791994204213819
func benchmarkBasicProgram(b *testing.B, source string) {
ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(b, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(b, err)
- require.True(b, cost < 2000)
//b.Logf("%d bytes of program", len(ops.Program))
//b.Log(hex.EncodeToString(ops.Program))
b.ResetTimer()
@@ -3129,9 +3190,9 @@ func benchmarkBasicProgram(b *testing.B, source string) {
func benchmarkExpensiveProgram(b *testing.B, source string) {
ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(b, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
- require.NoError(b, err)
- require.True(b, cost > 1000)
+ ep := defaultEvalParams(nil, nil)
+ err = Check(ops.Program, ep)
+ require.Error(b, err) // excessive cost
//b.Logf("%d bytes of program", len(ops.Program))
//b.Log(hex.EncodeToString(ops.Program))
b.ResetTimer()
@@ -3330,7 +3391,7 @@ func BenchmarkCheckx5(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
for _, program := range programs {
- _, err = Check(program.Program, defaultEvalParams(nil, nil))
+ err = Check(program.Program, defaultEvalParams(nil, nil))
if err != nil {
require.NoError(b, err)
}
@@ -3536,7 +3597,7 @@ func TestApplicationsDisallowOldTeal(t *testing.T) {
ops, err := AssembleStringWithVersion(source, v)
require.NoError(t, err)
- _, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("program version must be >= %d", appsEnabledVersion))
@@ -3548,7 +3609,7 @@ func TestApplicationsDisallowOldTeal(t *testing.T) {
ops, err := AssembleStringWithVersion(source, appsEnabledVersion)
require.NoError(t, err)
- _, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
_, err = EvalStateful(ops.Program, ep)
@@ -3608,7 +3669,7 @@ func TestAnyRekeyToOrApplicationRaisesMinTealVersion(t *testing.T) {
ops, err := AssembleStringWithVersion(source, v)
require.NoError(t, err)
- _, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.Error(t, err)
require.Contains(t, err.Error(), expected)
@@ -3616,7 +3677,7 @@ func TestAnyRekeyToOrApplicationRaisesMinTealVersion(t *testing.T) {
require.Error(t, err)
require.Contains(t, err.Error(), expected)
- _, err = Check(ops.Program, ep)
+ err = Check(ops.Program, ep)
require.Error(t, err)
require.Contains(t, err.Error(), expected)
@@ -3630,13 +3691,13 @@ func TestAnyRekeyToOrApplicationRaisesMinTealVersion(t *testing.T) {
ops, err := AssembleStringWithVersion(source, v)
require.NoError(t, err)
- _, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
_, err = EvalStateful(ops.Program, ep)
require.NoError(t, err)
- _, err = Check(ops.Program, ep)
+ err = Check(ops.Program, ep)
require.NoError(t, err)
_, err = Eval(ops.Program, ep)
@@ -3671,8 +3732,8 @@ func TestAllowedOpcodesV2(t *testing.T) {
"app_global_put": "byte 0x41; dup; app_global_put",
"app_local_del": "int 0; byte 0x41; app_local_del",
"app_global_del": "byte 0x41; app_global_del",
- "asset_holding_get": "asset_holding_get AssetBalance",
- "asset_params_get": "asset_params_get AssetTotal",
+ "asset_holding_get": "int 1; int 1; asset_holding_get AssetBalance",
+ "asset_params_get": "int 1; asset_params_get AssetTotal",
}
excluded := map[string]bool{
@@ -3693,7 +3754,7 @@ func TestAllowedOpcodesV2(t *testing.T) {
require.Contains(t, source, spec.Name)
ops := testProg(t, source, AssemblerMaxVersion)
// all opcodes allowed in stateful mode so use CheckStateful/EvalStateful
- _, err := CheckStateful(ops.Program, ep)
+ err := CheckStateful(ops.Program, ep)
require.NoError(t, err, source)
_, err = EvalStateful(ops.Program, ep)
if spec.Name != "return" {
@@ -3704,10 +3765,10 @@ func TestAllowedOpcodesV2(t *testing.T) {
for v := byte(0); v <= 1; v++ {
ops.Program[0] = v
- _, err = Check(ops.Program, ep)
+ err = Check(ops.Program, ep)
require.Error(t, err, source)
require.Contains(t, err.Error(), "illegal opcode")
- _, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.Error(t, err, source)
require.Contains(t, err.Error(), "illegal opcode")
_, err = Eval(ops.Program, ep)
@@ -3756,7 +3817,7 @@ func TestAllowedOpcodesV3(t *testing.T) {
require.Contains(t, source, spec.Name)
ops := testProg(t, source, AssemblerMaxVersion)
// all opcodes allowed in stateful mode so use CheckStateful/EvalStateful
- _, err := CheckStateful(ops.Program, ep)
+ err := CheckStateful(ops.Program, ep)
require.NoError(t, err, source)
_, err = EvalStateful(ops.Program, ep)
require.Error(t, err, source)
@@ -3764,10 +3825,10 @@ func TestAllowedOpcodesV3(t *testing.T) {
for v := byte(0); v <= 1; v++ {
ops.Program[0] = v
- _, err = Check(ops.Program, ep)
+ err = Check(ops.Program, ep)
require.Error(t, err, source)
require.Contains(t, err.Error(), "illegal opcode")
- _, err = CheckStateful(ops.Program, ep)
+ err = CheckStateful(ops.Program, ep)
require.Error(t, err, source)
require.Contains(t, err.Error(), "illegal opcode")
_, err = Eval(ops.Program, ep)
@@ -3797,7 +3858,7 @@ func TestRekeyFailsOnOldVersion(t *testing.T) {
ep := defaultEvalParams(&sb, &txn)
ep.TxnGroup = []transactions.SignedTxn{txn}
ep.Proto = &proto
- _, err = Check(ops.Program, ep)
+ err = Check(ops.Program, ep)
require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("program version must be >= %d", rekeyingEnabledVersion))
pass, err := Eval(ops.Program, ep)
@@ -3825,24 +3886,36 @@ func testEvaluation(t *testing.T, program string, introduced uint64, tester eval
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
if v < introduced {
- testProg(t, obfuscate(program), v, expect{0, "...opcode was introduced..."})
+ testProg(t, obfuscate(program), v, expect{0, "...was introduced..."})
return
}
ops := testProg(t, program, v)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
- require.NoError(t, err)
- require.True(t, cost < 1000)
- var txn transactions.SignedTxn
- txn.Lsig.Logic = ops.Program
- sb := strings.Builder{}
- pass, err := Eval(ops.Program, defaultEvalParams(&sb, &txn))
- ok := tester(pass, err)
- if !ok {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
+ // Programs created with a previous assembler
+ // should still operate properly with future
+ // EvalParams, so try all forward versions.
+ for lv := v; lv <= AssemblerMaxVersion; lv++ {
+ t.Run(fmt.Sprintf("lv=%d", lv), func(t *testing.T) {
+ sb := strings.Builder{}
+ err := Check(ops.Program, defaultEvalParamsWithVersion(&sb, nil, lv))
+ if err != nil {
+ t.Log(hex.EncodeToString(ops.Program))
+ t.Log(sb.String())
+ }
+ require.NoError(t, err)
+ var txn transactions.SignedTxn
+ txn.Lsig.Logic = ops.Program
+ sb = strings.Builder{}
+ pass, err := Eval(ops.Program, defaultEvalParamsWithVersion(&sb, &txn, lv))
+ ok := tester(pass, err)
+ if !ok {
+ t.Log(hex.EncodeToString(ops.Program))
+ t.Log(sb.String())
+ t.Log(err)
+ }
+ require.True(t, ok)
+ isNotPanic(t, err) // Never want a Go level panic.
+ })
}
- require.True(t, ok)
- isNotPanic(t, err) // Never want a Go level panic.
})
}
}
@@ -3978,3 +4051,97 @@ func TestPush(t *testing.T) {
ops2 = testProg(t, "int 2; int 3; int 5; int 6; pushint 1", 3)
require.Less(t, len(ops2.Program), len(ops1.Program))
}
+
+func TestLoop(t *testing.T) {
+ t.Parallel()
+ // Double until > 10. Should be 16
+ testAccepts(t, "int 1; loop: int 2; *; dup; int 10; <; bnz loop; int 16; ==", 4)
+
+ // Why does this label on line with instruction cause trouble?
+ testAccepts(t, "int 1; loop: int 2; *; dup; int 10; <; bnz loop; int 16; ==", 4)
+
+ // Infinite loop because multiply by one instead of two
+ testPanics(t, "int 1; loop:; int 1; *; dup; int 10; <; bnz loop; int 16; ==", 4)
+}
+
+func TestSubroutine(t *testing.T) {
+ t.Parallel()
+ testAccepts(t, "int 1; callsub double; int 2; ==; return; double: dup; +; retsub;", 4)
+ testAccepts(t, `
+b main;
+fact:
+ dup
+ int 2
+ <
+ bz recur
+ retsub
+recur:
+ dup
+ int 1
+ -
+ callsub fact
+ *
+ retsub
+
+main:
+ int 5
+ callsub fact
+ int 120
+ ==
+`, 4)
+
+ // Mutually recursive odd/even. Each is intentionally done in a slightly different way.
+ testAccepts(t, `
+b main
+odd: // If 0, return false, else return !even
+ dup
+ bz retfalse
+ callsub even
+ !
+ retsub
+
+retfalse:
+ pop
+ int 0
+ retsub
+
+
+even: // If 0, return true, else decrement and return even
+ dup
+ bz rettrue
+ int 1
+ -
+ callsub odd
+ retsub
+
+rettrue:
+ pop
+ int 1
+ retsub
+
+
+main:
+ int 1
+ callsub odd
+ assert
+
+ int 0
+ callsub even
+ assert
+
+ int 10
+ callsub even
+ assert
+
+ int 10
+ callsub odd
+ !
+ assert
+
+ int 1
+`, 4)
+
+ testPanics(t, "int 1; retsub", 4)
+
+ testPanics(t, "int 1; recur: callsub recur; int 1", 4)
+}
diff --git a/data/transactions/logic/fields.go b/data/transactions/logic/fields.go
index eb3b956f1..41aecb4b9 100644
--- a/data/transactions/logic/fields.go
+++ b/data/transactions/logic/fields.go
@@ -518,7 +518,7 @@ func init() {
txnTypeConstToUint64 = make(map[string]uint64, len(TxnTypeNames))
for tt, v := range txnTypeIndexes {
- symbol := TypeNameDescription(tt)
+ symbol := TypeNameDescriptions[tt]
txnTypeConstToUint64[symbol] = v
}
diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go
index 8748b4225..c9822a202 100644
--- a/data/transactions/logic/opcodes.go
+++ b/data/transactions/logic/opcodes.go
@@ -21,7 +21,7 @@ import (
)
// LogicVersion defines default assembler and max eval versions
-const LogicVersion = 3
+const LogicVersion = 4
// rekeyingEnabledVersion is the version of TEAL where RekeyTo functionality
// was enabled. This is important to remember so that old TEAL accounts cannot
@@ -33,6 +33,10 @@ const rekeyingEnabledVersion = 2
// from being used with applications. Do not edit!
const appsEnabledVersion = 2
+// backBranchEabledVersion is the version of TEAL where branches could
+// go back (and cost accounting was done during execution)
+const backBranchEnabledVersion = 4
+
// opDetails records details such as non-standard costs, immediate
// arguments, or dynamic layout controlled by a check function.
type opDetails struct {
@@ -51,7 +55,7 @@ func costly(cost int) opDetails {
func immediates(name string, rest ...string) opDetails {
num := 1 + len(rest)
- immediates := make([]immediate, num, len(rest)+1)
+ immediates := make([]immediate, num)
immediates[0] = immediate{name, immByte}
for i, n := range rest {
immediates[i+1] = immediate{n, immByte}
@@ -111,7 +115,8 @@ var anyIntInt = StackTypes{StackAny, StackUint64, StackUint64}
//
// Any changes should be reflected in README_in.md which serves as the language spec.
//
-// WARNING: special case op assembly by argOps functions must do their own type stack maintenance via ops.tpop() ops.tpush()/ops.tpusha()
+// Note: assembly can specialize an Any return type if known at
+// assembly-time, with ops.returns()
var OpSpecs = []OpSpec{
{0x00, "err", opErr, asmDefault, disDefault, nil, nil, 1, modeAny, opDefault},
{0x01, "sha256", opSHA256, asmDefault, disDefault, oneBytes, oneBytes, 1, modeAny, costly(7)},
@@ -150,19 +155,20 @@ var OpSpecs = []OpSpec{
{0x1c, "~", opBitNot, asmDefault, disDefault, oneInt, oneInt, 1, modeAny, opDefault},
{0x1d, "mulw", opMulw, asmDefault, disDefault, twoInts, twoInts, 1, modeAny, opDefault},
{0x1e, "addw", opAddw, asmDefault, disDefault, twoInts, twoInts, 2, modeAny, opDefault},
+ {0x1f, "divw", opDivw, asmDefault, disDefault, twoInts.plus(twoInts), twoInts.plus(twoInts), 4, modeAny, opDefault},
{0x20, "intcblock", opIntConstBlock, assembleIntCBlock, disIntcblock, nil, nil, 1, modeAny, varies(checkIntConstBlock, "uint ...", immInts)},
- {0x21, "intc", opIntConstLoad, assembleIntC, disDefault, nil, oneInt, 1, modeAny, immediates("i")},
- {0x22, "intc_0", opIntConst0, asmDefault, disDefault, nil, oneInt, 1, modeAny, opDefault},
- {0x23, "intc_1", opIntConst1, asmDefault, disDefault, nil, oneInt, 1, modeAny, opDefault},
- {0x24, "intc_2", opIntConst2, asmDefault, disDefault, nil, oneInt, 1, modeAny, opDefault},
- {0x25, "intc_3", opIntConst3, asmDefault, disDefault, nil, oneInt, 1, modeAny, opDefault},
+ {0x21, "intc", opIntConstLoad, assembleIntC, disIntc, nil, oneInt, 1, modeAny, immediates("i")},
+ {0x22, "intc_0", opIntConst0, asmDefault, disIntc, nil, oneInt, 1, modeAny, opDefault},
+ {0x23, "intc_1", opIntConst1, asmDefault, disIntc, nil, oneInt, 1, modeAny, opDefault},
+ {0x24, "intc_2", opIntConst2, asmDefault, disIntc, nil, oneInt, 1, modeAny, opDefault},
+ {0x25, "intc_3", opIntConst3, asmDefault, disIntc, nil, oneInt, 1, modeAny, opDefault},
{0x26, "bytecblock", opByteConstBlock, assembleByteCBlock, disBytecblock, nil, nil, 1, modeAny, varies(checkByteConstBlock, "bytes ...", immBytess)},
- {0x27, "bytec", opByteConstLoad, assembleByteC, disDefault, nil, oneBytes, 1, modeAny, immediates("i")},
- {0x28, "bytec_0", opByteConst0, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opDefault},
- {0x29, "bytec_1", opByteConst1, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opDefault},
- {0x2a, "bytec_2", opByteConst2, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opDefault},
- {0x2b, "bytec_3", opByteConst3, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opDefault},
+ {0x27, "bytec", opByteConstLoad, assembleByteC, disBytec, nil, oneBytes, 1, modeAny, immediates("i")},
+ {0x28, "bytec_0", opByteConst0, asmDefault, disBytec, nil, oneBytes, 1, modeAny, opDefault},
+ {0x29, "bytec_1", opByteConst1, asmDefault, disBytec, nil, oneBytes, 1, modeAny, opDefault},
+ {0x2a, "bytec_2", opByteConst2, asmDefault, disBytec, nil, oneBytes, 1, modeAny, opDefault},
+ {0x2b, "bytec_3", opByteConst3, asmDefault, disBytec, nil, oneBytes, 1, modeAny, opDefault},
{0x2c, "arg", opArg, assembleArg, disDefault, nil, oneBytes, 1, runModeSignature, immediates("n")},
{0x2d, "arg_0", opArg0, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opDefault},
{0x2e, "arg_1", opArg1, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opDefault},
@@ -225,6 +231,16 @@ var OpSpecs = []OpSpec{
// Immediate bytes and ints. Smaller code size for single use of constant.
{0x80, "pushbytes", opPushBytes, asmPushBytes, disPushBytes, nil, oneBytes, 3, modeAny, varies(checkPushBytes, "bytes", immBytes)},
{0x81, "pushint", opPushInt, asmPushInt, disPushInt, nil, oneInt, 3, modeAny, varies(checkPushInt, "uint", immInt)},
+
+ // "Function oriented"
+ {0x88, "callsub", opCallSub, assembleBranch, disBranch, nil, nil, 4, modeAny, opBranch},
+ {0x89, "retsub", opRetSub, asmDefault, disDefault, nil, nil, 4, modeAny, opDefault},
+ // Leave a little room for indirect function calls, or similar
+
+ // More math
+ // shl, shr
+ // divw, modw convenience
+ // expmod
}
type sortByOpcode []OpSpec
diff --git a/data/transactions/logic/opcodes_test.go b/data/transactions/logic/opcodes_test.go
index 7c61f9068..36dca364a 100644
--- a/data/transactions/logic/opcodes_test.go
+++ b/data/transactions/logic/opcodes_test.go
@@ -117,8 +117,8 @@ func TestOpcodesByVersion(t *testing.T) {
func TestOpcodesVersioningV2(t *testing.T) {
t.Parallel()
- require.Equal(t, 4, len(opsByOpcode))
- require.Equal(t, 4, len(OpsByName))
+ require.Equal(t, LogicVersion+1, len(opsByOpcode))
+ require.Equal(t, LogicVersion+1, len(OpsByName))
// ensure v0 has only v0 opcodes
cntv0 := 0
diff --git a/data/transactions/signedtxn.go b/data/transactions/signedtxn.go
index 7a20c3d8a..8b54300d9 100644
--- a/data/transactions/signedtxn.go
+++ b/data/transactions/signedtxn.go
@@ -119,3 +119,13 @@ func (s *SignedTxnInBlock) Hash() crypto.Digest {
defer protocol.PutEncodingBuf(enc)
return crypto.Hash(enc)
}
+
+// WrapSignedTxnsWithAD takes an array SignedTxn and returns the same as SignedTxnWithAD
+// Each txn's ApplyData is the default empty state.
+func WrapSignedTxnsWithAD(txgroup []SignedTxn) []SignedTxnWithAD {
+ txgroupad := make([]SignedTxnWithAD, len(txgroup))
+ for i, tx := range txgroup {
+ txgroupad[i].SignedTxn = tx
+ }
+ return txgroupad
+}
diff --git a/data/transactions/transaction.go b/data/transactions/transaction.go
index 4c30d1c3b..34f52eed6 100644
--- a/data/transactions/transaction.go
+++ b/data/transactions/transaction.go
@@ -17,6 +17,7 @@
package transactions
import (
+ "errors"
"fmt"
"github.com/algorand/go-algorand/config"
@@ -242,6 +243,14 @@ func (tx Transaction) MatchAddress(addr basics.Address, spec SpecialAddresses) b
return false
}
+var errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound = errors.New("transaction first voting round need to be less than its last voting round")
+var errKeyregTxnNonCoherentVotingKeys = errors.New("the following transaction fields need to be clear/set together : votekey, selkey, votekd")
+var errKeyregTxnOfflineTransactionHasVotingRounds = errors.New("on going offline key registration transaction, the vote first and vote last fields should not be set")
+var errKeyregTxnUnsupportedSwitchToNonParticipating = errors.New("transaction tries to mark an account as nonparticipating, but that transaction is not supported")
+var errKeyregTxnGoingOnlineWithNonParticipating = errors.New("transaction tries to register keys to go online, but nonparticipatory flag is set")
+var errKeyregTxnGoingOnlineWithZeroVoteLast = errors.New("transaction tries to register keys to go online, but vote last is set to zero")
+var errKeyregTxnGoingOnlineWithFirstVoteAfterLastValid = errors.New("transaction tries to register keys to go online, but first voting round is beyond the round after last valid round")
+
// WellFormed checks that the transaction looks reasonable on its own (but not necessarily valid against the actual ledger). It does not check signatures.
func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusParams) error {
switch tx.Type {
@@ -253,18 +262,48 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa
}
case protocol.KeyRegistrationTx:
+ if proto.EnableKeyregCoherencyCheck {
+ // ensure that the VoteLast is greater or equal to the VoteFirst
+ if tx.KeyregTxnFields.VoteFirst > tx.KeyregTxnFields.VoteLast {
+ return errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound
+ }
+
+ // The trio of [VotePK, SelectionPK, VoteKeyDilution] needs to be all zeros or all non-zero for the transaction to be valid.
+ if !((tx.KeyregTxnFields.VotePK == crypto.OneTimeSignatureVerifier{} && tx.KeyregTxnFields.SelectionPK == crypto.VRFVerifier{} && tx.KeyregTxnFields.VoteKeyDilution == 0) ||
+ (tx.KeyregTxnFields.VotePK != crypto.OneTimeSignatureVerifier{} && tx.KeyregTxnFields.SelectionPK != crypto.VRFVerifier{} && tx.KeyregTxnFields.VoteKeyDilution != 0)) {
+ return errKeyregTxnNonCoherentVotingKeys
+ }
+
+ // if it's a going offline transaction
+ if tx.KeyregTxnFields.VoteKeyDilution == 0 {
+ // check that we don't have any VoteFirst/VoteLast fields.
+ if tx.KeyregTxnFields.VoteFirst != 0 || tx.KeyregTxnFields.VoteLast != 0 {
+ return errKeyregTxnOfflineTransactionHasVotingRounds
+ }
+ } else {
+ // going online
+ if tx.KeyregTxnFields.VoteLast == 0 {
+ return errKeyregTxnGoingOnlineWithZeroVoteLast
+ }
+ if tx.KeyregTxnFields.VoteFirst > tx.LastValid+1 {
+ return errKeyregTxnGoingOnlineWithFirstVoteAfterLastValid
+ }
+ }
+ }
+
// check that, if this tx is marking an account nonparticipating,
// it supplies no key (as though it were trying to go offline)
if tx.KeyregTxnFields.Nonparticipation {
if !proto.SupportBecomeNonParticipatingTransactions {
// if the transaction has the Nonparticipation flag high, but the protocol does not support
// that type of transaction, it is invalid.
- return fmt.Errorf("transaction tries to mark an account as nonparticipating, but that transaction is not supported")
+ return errKeyregTxnUnsupportedSwitchToNonParticipating
}
suppliesNullKeys := tx.KeyregTxnFields.VotePK == crypto.OneTimeSignatureVerifier{} || tx.KeyregTxnFields.SelectionPK == crypto.VRFVerifier{}
if !suppliesNullKeys {
- return fmt.Errorf("transaction tries to register keys to go online, but nonparticipatory flag is set")
+ return errKeyregTxnGoingOnlineWithNonParticipating
}
+
}
case protocol.AssetConfigTx:
diff --git a/data/transactions/transaction_test.go b/data/transactions/transaction_test.go
index cb9853542..6ccd83597 100644
--- a/data/transactions/transaction_test.go
+++ b/data/transactions/transaction_test.go
@@ -17,6 +17,7 @@
package transactions
import (
+ "flag"
"fmt"
"testing"
@@ -164,3 +165,647 @@ func TestWellFormedErrors(t *testing.T) {
require.Equal(t, usecase.expectedError, err)
}
}
+
+var generateFlag = flag.Bool("generate", false, "")
+
+// running test with -generate would generate the matrix used in the test ( without the "correct" errors )
+func TestWellFormedKeyRegistrationTx(t *testing.T) {
+ flag.Parse()
+
+ // addr has no significance here other than being a normal valid address
+ addr, err := basics.UnmarshalChecksumAddress("NDQCJNNY5WWWFLP4GFZ7MEF2QJSMZYK6OWIV2AQ7OMAVLEFCGGRHFPKJJA")
+ require.NoError(t, err)
+
+ tx := generateDummyGoNonparticpatingTransaction(addr)
+ curProto := config.Consensus[protocol.ConsensusCurrentVersion]
+ feeSink := basics.Address{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21}
+ spec := SpecialAddresses{FeeSink: feeSink}
+ if !curProto.SupportBecomeNonParticipatingTransactions {
+ t.Skipf("Skipping rest of test because current protocol version %v does not support become-nonparticipating transactions", protocol.ConsensusCurrentVersion)
+ }
+
+ // this tx is well-formed
+ err = tx.WellFormed(spec, curProto)
+ require.NoError(t, err)
+
+ type keyRegTestCase struct {
+ votePK crypto.OneTimeSignatureVerifier
+ selectionPK crypto.VRFVerifier
+ voteFirst basics.Round
+ voteLast basics.Round
+ lastValid basics.Round
+ voteKeyDilution uint64
+ nonParticipation bool
+ supportBecomeNonParticipatingTransactions bool
+ enableKeyregCoherencyCheck bool
+ err error
+ }
+ votePKValue := crypto.OneTimeSignatureVerifier{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21}
+ selectionPKValue := crypto.VRFVerifier{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21}
+
+ runTestCase := func(testCase keyRegTestCase) error {
+ tx.KeyregTxnFields.VotePK = testCase.votePK
+ tx.KeyregTxnFields.SelectionPK = testCase.selectionPK
+ tx.KeyregTxnFields.VoteFirst = testCase.voteFirst
+ tx.KeyregTxnFields.VoteLast = testCase.voteLast
+ tx.KeyregTxnFields.VoteKeyDilution = testCase.voteKeyDilution
+ tx.KeyregTxnFields.Nonparticipation = testCase.nonParticipation
+ tx.LastValid = testCase.lastValid
+
+ curProto.SupportBecomeNonParticipatingTransactions = testCase.supportBecomeNonParticipatingTransactions
+ curProto.EnableKeyregCoherencyCheck = testCase.enableKeyregCoherencyCheck
+ return tx.WellFormed(spec, curProto)
+ }
+
+ if *generateFlag == true {
+ fmt.Printf("keyRegTestCases := []keyRegTestCase{\n")
+ idx := 0
+ for _, votePK := range []crypto.OneTimeSignatureVerifier{crypto.OneTimeSignatureVerifier{}, votePKValue} {
+ for _, selectionPK := range []crypto.VRFVerifier{crypto.VRFVerifier{}, selectionPKValue} {
+ for _, voteFirst := range []basics.Round{basics.Round(0), basics.Round(5)} {
+ for _, voteLast := range []basics.Round{basics.Round(0), basics.Round(10)} {
+ for _, lastValid := range []basics.Round{basics.Round(4), basics.Round(3)} {
+ for _, voteKeyDilution := range []uint64{0, 10000} {
+ for _, nonParticipation := range []bool{false, true} {
+ for _, supportBecomeNonParticipatingTransactions := range []bool{false, true} {
+ for _, enableKeyregCoherencyCheck := range []bool{false, true} {
+ outcome := runTestCase(keyRegTestCase{
+ votePK,
+ selectionPK,
+ voteFirst,
+ voteLast,
+ lastValid,
+ voteKeyDilution,
+ nonParticipation,
+ supportBecomeNonParticipatingTransactions,
+ enableKeyregCoherencyCheck,
+ nil})
+ errStr := "nil"
+ switch outcome {
+ case errKeyregTxnUnsupportedSwitchToNonParticipating:
+ errStr = "errKeyregTxnUnsupportedSwitchToNonParticipating"
+ case errKeyregTxnGoingOnlineWithNonParticipating:
+ errStr = "errKeyregTxnGoingOnlineWithNonParticipating"
+ case errKeyregTxnNonCoherentVotingKeys:
+ errStr = "errKeyregTxnNonCoherentVotingKeys"
+ case errKeyregTxnOfflineTransactionHasVotingRounds:
+ errStr = "errKeyregTxnOfflineTransactionHasVotingRounds"
+ case errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound:
+ errStr = "errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound"
+ case errKeyregTxnGoingOnlineWithZeroVoteLast:
+ errStr = "errKeyregTxnGoingOnlineWithZeroVoteLast"
+ case errKeyregTxnGoingOnlineWithNonParticipating:
+ errStr = "errKeyregTxnGoingOnlineWithNonParticipating"
+ case errKeyregTxnGoingOnlineWithFirstVoteAfterLastValid:
+ errStr = "errKeyregTxnGoingOnlineWithFirstVoteAfterLastValid"
+ default:
+ require.Nil(t, outcome)
+
+ }
+ s := "/* %3d */ keyRegTestCase{votePK:"
+ if votePK == votePKValue {
+ s += "votePKValue"
+ } else {
+ s += "crypto.OneTimeSignatureVerifier{}"
+ }
+ s += ", selectionPK:"
+ if selectionPK == selectionPKValue {
+ s += "selectionPKValue"
+ } else {
+ s += "crypto.VRFVerifier{}"
+ }
+ s = fmt.Sprintf("%s, voteFirst:basics.Round(%2d), voteLast:basics.Round(%2d), lastValid:basics.Round(%2d), voteKeyDilution: %5d, nonParticipation: %v,supportBecomeNonParticipatingTransactions:%v, enableKeyregCoherencyCheck:%v, err:%s},\n",
+ s, voteFirst, voteLast, lastValid, voteKeyDilution, nonParticipation, supportBecomeNonParticipatingTransactions, enableKeyregCoherencyCheck, errStr)
+ fmt.Printf(s, idx)
+ idx++
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ fmt.Printf("}\n")
+ return
+ }
+ keyRegTestCases := []keyRegTestCase{
+ /* 0 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 1 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: nil},
+ /* 2 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 3 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: nil},
+ /* 4 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 5 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 6 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 7 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: nil},
+ /* 8 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 9 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 10 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 11 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 12 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 13 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 14 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 15 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 16 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 17 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: nil},
+ /* 18 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 19 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: nil},
+ /* 20 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 21 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 22 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 23 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: nil},
+ /* 24 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 25 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 26 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 27 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 28 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 29 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 30 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 31 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 32 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 33 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds},
+ /* 34 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 35 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds},
+ /* 36 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 37 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds},
+ /* 38 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 39 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds},
+ /* 40 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 41 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 42 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 43 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 44 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 45 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 46 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 47 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 48 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 49 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds},
+ /* 50 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 51 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds},
+ /* 52 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 53 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds},
+ /* 54 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 55 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds},
+ /* 56 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 57 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 58 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 59 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 60 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 61 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 62 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 63 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 64 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 65 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 66 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 67 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 68 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 69 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 70 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 71 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 72 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 73 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 74 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 75 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 76 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 77 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 78 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 79 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 80 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 81 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 82 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 83 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 84 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 85 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 86 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 87 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 88 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 89 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 90 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 91 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 92 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 93 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 94 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 95 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 96 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 97 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds},
+ /* 98 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 99 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds},
+ /* 100 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 101 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds},
+ /* 102 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 103 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds},
+ /* 104 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 105 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 106 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 107 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 108 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 109 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 110 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 111 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 112 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 113 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds},
+ /* 114 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 115 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds},
+ /* 116 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 117 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds},
+ /* 118 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 119 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds},
+ /* 120 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 121 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 122 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 123 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 124 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 125 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 126 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 127 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 128 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 129 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 130 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 131 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 132 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 133 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 134 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 135 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 136 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 137 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 138 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 139 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 140 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 141 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 142 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 143 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 144 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 145 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 146 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 147 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 148 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 149 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 150 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 151 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 152 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 153 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 154 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 155 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 156 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 157 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 158 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 159 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 160 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 161 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 162 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 163 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 164 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 165 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 166 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 167 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 168 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 169 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 170 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 171 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 172 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 173 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 174 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 175 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 176 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 177 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 178 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 179 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 180 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 181 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 182 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 183 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 184 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 185 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 186 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 187 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 188 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 189 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 190 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 191 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 192 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 193 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 194 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 195 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 196 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 197 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 198 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 199 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 200 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 201 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 202 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 203 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 204 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 205 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 206 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 207 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 208 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 209 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 210 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 211 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 212 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 213 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 214 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 215 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 216 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 217 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 218 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 219 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 220 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 221 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 222 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 223 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 224 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 225 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 226 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 227 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 228 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 229 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 230 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 231 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 232 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 233 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 234 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 235 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 236 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 237 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 238 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 239 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 240 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 241 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 242 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 243 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 244 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 245 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 246 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 247 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 248 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 249 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 250 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 251 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 252 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 253 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 254 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 255 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 256 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 257 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 258 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 259 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 260 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 261 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 262 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 263 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 264 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 265 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 266 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 267 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 268 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 269 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 270 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 271 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 272 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 273 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 274 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 275 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 276 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 277 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 278 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 279 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 280 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 281 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 282 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 283 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 284 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 285 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 286 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 287 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 288 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 289 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 290 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 291 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 292 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 293 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 294 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 295 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 296 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 297 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 298 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 299 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 300 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 301 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 302 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 303 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 304 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 305 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 306 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 307 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 308 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 309 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 310 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 311 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 312 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 313 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 314 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 315 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 316 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 317 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 318 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 319 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 320 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 321 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 322 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 323 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 324 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 325 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 326 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 327 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 328 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 329 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 330 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 331 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 332 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 333 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 334 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 335 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 336 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 337 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 338 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 339 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 340 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 341 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 342 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 343 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 344 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 345 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 346 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 347 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 348 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 349 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 350 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 351 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 352 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 353 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 354 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 355 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 356 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 357 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 358 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 359 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 360 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 361 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 362 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 363 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 364 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 365 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 366 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 367 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 368 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 369 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 370 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 371 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 372 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 373 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 374 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 375 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 376 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 377 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 378 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 379 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 380 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 381 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 382 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 383 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 384 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 385 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 386 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 387 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 388 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 389 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 390 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 391 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 392 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 393 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithZeroVoteLast},
+ /* 394 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 395 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithZeroVoteLast},
+ /* 396 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 397 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithZeroVoteLast},
+ /* 398 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 399 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithZeroVoteLast},
+ /* 400 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 401 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 402 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 403 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 404 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 405 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 406 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 407 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 408 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 409 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithZeroVoteLast},
+ /* 410 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 411 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithZeroVoteLast},
+ /* 412 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 413 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithZeroVoteLast},
+ /* 414 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 415 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithZeroVoteLast},
+ /* 416 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 417 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 418 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 419 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 420 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 421 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 422 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 423 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 424 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 425 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: nil},
+ /* 426 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 427 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: nil},
+ /* 428 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 429 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 430 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 431 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 432 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 433 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 434 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 435 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 436 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 437 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 438 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 439 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 440 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 441 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: nil},
+ /* 442 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 443 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: nil},
+ /* 444 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 445 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 446 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 447 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 448 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 449 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 450 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 451 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 452 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 453 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 454 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 455 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 456 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 457 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 458 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 459 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 460 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 461 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 462 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 463 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 464 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 465 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 466 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 467 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 468 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 469 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 470 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 471 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 472 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 473 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 474 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 475 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 476 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 477 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 478 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 479 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound},
+ /* 480 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 481 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 482 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 483 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 484 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 485 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 486 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 487 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 488 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 489 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: nil},
+ /* 490 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 491 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: nil},
+ /* 492 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 493 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 494 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 495 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 496 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 497 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 498 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 499 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 500 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 501 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 502 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 503 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys},
+ /* 504 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil},
+ /* 505 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithFirstVoteAfterLastValid},
+ /* 506 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil},
+ /* 507 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithFirstVoteAfterLastValid},
+ /* 508 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating},
+ /* 509 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithFirstVoteAfterLastValid},
+ /* 510 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating},
+ /* 511 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithFirstVoteAfterLastValid},
+ }
+ for testcaseIdx, testCase := range keyRegTestCases {
+ err := runTestCase(testCase)
+ require.Equalf(t, testCase.err, err, "index: %d\ntest case: %#v", testcaseIdx, testCase)
+ }
+}
diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go
index 450f9ee63..ed9ccffa8 100644
--- a/data/transactions/verify/txn.go
+++ b/data/transactions/verify/txn.go
@@ -214,13 +214,10 @@ func LogicSigSanityCheck(txn *transactions.SignedTxn, groupIndex int, groupCtx *
GroupIndex: groupIndex,
MinTealVersion: &groupCtx.minTealVersion,
}
- cost, err := logic.Check(lsig.Logic, ep)
+ err := logic.Check(lsig.Logic, ep)
if err != nil {
return err
}
- if cost > int(groupCtx.consensusParams.LogicSigMaxCost) {
- return fmt.Errorf("LogicSig.Logic too slow, %d > %d", cost, groupCtx.consensusParams.LogicSigMaxCost)
- }
hasMsig := false
numSigs := 0
diff --git a/debug/logfilter/example1.in b/debug/logfilter/example1.in
new file mode 100644
index 000000000..aba082013
--- /dev/null
+++ b/debug/logfilter/example1.in
@@ -0,0 +1,31 @@
+=== RUN TestAccountsCanSendMoney
+=== PAUSE TestAccountsCanSendMoney
+=== CONT TestAccountsCanSendMoney
+Created new rootkey: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp317215351/TestAccountsCanSendMoney/Wallet2.rootkey
+Created new rootkey: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp317215351/TestAccountsCanSendMoney/Wallet1.rootkey
+Created new partkey: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp317215351/TestAccountsCanSendMoney/Wallet2.0.3000000.partkey
+Created new partkey: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp317215351/TestAccountsCanSendMoney/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(57402) : Telemetry configured from '/Users/tsachi/.algorand/logging.config'
+algod(57402) : No REST API Token found. Generated token: ed061c29ef14bd560d7ca7b591ce118842d949dd42d1f473c4202ca72b7d5ff9
+algod(57402) : No Admin REST API Token found. Generated token: 9102a9b56b543456d0f11527097dd8a14a4c76a5f196168d98b7f1384404fd86
+algod(57402) : Logging to: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp317215351/TestAccountsCanSendMoney/Primary/node.log
+algod(57402) : Deadlock detection is set to: disabled (Default state is 'disable')
+algod(57402) : Initializing the Algorand node...
+algod(57402) : Success!
+algod(57402) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(57402) : ⇨ http server started on 127.0.0.1:8080
+algod(57403) : Telemetry configured from '/Users/tsachi/.algorand/logging.config'
+algod(57403) : No REST API Token found. Generated token: e752c98368436d74c4074a1cd2b15f31ea7d7661768764018d51b42b2b53fba7
+algod(57403) : No Admin REST API Token found. Generated token: 5bcc51fabb5247bb8a839a18e7ac3f7914138aea92e19257ec13765015649483
+algod(57403) : Logging to: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp317215351/TestAccountsCanSendMoney/Node/node.log
+algod(57403) : Deadlock detection is set to: disabled (Default state is 'disable')
+algod(57403) : Initializing the Algorand node...
+algod(57403) : Success!
+algod(57403) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:64818. Press Ctrl-C to exit
+algod(57403) : ⇨ http server started on 127.0.0.1:64818
+algod(57403) : Exiting on terminated
+algod(57402) : Exiting on terminated
+--- PASS: TestAccountsCanSendMoney (112.79s)
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/features/transactions 113.107s
diff --git a/debug/logfilter/example1.out.expected b/debug/logfilter/example1.out.expected
new file mode 100644
index 000000000..7bafe7b35
--- /dev/null
+++ b/debug/logfilter/example1.out.expected
@@ -0,0 +1,2 @@
+--- PASS: TestAccountsCanSendMoney (112.79s)
+ok github.com/algorand/go-algorand/test/e2e-go/features/transactions 113.107s
diff --git a/debug/logfilter/example2.in b/debug/logfilter/example2.in
new file mode 100644
index 000000000..fbf835f59
--- /dev/null
+++ b/debug/logfilter/example2.in
@@ -0,0 +1,98 @@
+=== RUN TestVet
+=== PAUSE TestVet
+=== RUN TestVetAsm
+=== PAUSE TestVetAsm
+=== RUN TestVetDirs
+=== PAUSE TestVetDirs
+=== RUN TestTags
+=== PAUSE TestTags
+=== RUN TestVetVerbose
+=== PAUSE TestVetVerbose
+=== CONT TestVet
+=== CONT TestTags
+=== CONT TestVetVerbose
+=== RUN TestTags/testtag
+=== PAUSE TestTags/testtag
+=== CONT TestVetDirs
+=== CONT TestVetAsm
+=== RUN TestVet/0
+=== PAUSE TestVet/0
+=== RUN TestVet/1
+=== PAUSE TestVet/1
+=== RUN TestVet/2
+=== PAUSE TestVet/2
+=== RUN TestVet/3
+=== PAUSE TestVet/3
+=== RUN TestVet/4
+=== RUN TestTags/x_testtag_y
+=== PAUSE TestVet/4
+=== RUN TestVet/5
+=== PAUSE TestVet/5
+=== PAUSE TestTags/x_testtag_y
+=== RUN TestVet/6
+=== RUN TestTags/x,testtag,y
+=== PAUSE TestTags/x,testtag,y
+=== RUN TestVetDirs/testingpkg
+=== PAUSE TestVet/6
+=== CONT TestTags/x,testtag,y
+=== PAUSE TestVetDirs/testingpkg
+=== RUN TestVetDirs/divergent
+=== RUN TestVet/7
+=== PAUSE TestVet/7
+=== PAUSE TestVetDirs/divergent
+=== CONT TestTags/x_testtag_y
+=== CONT TestTags/testtag
+=== RUN TestVetDirs/buildtag
+=== PAUSE TestVetDirs/buildtag
+=== CONT TestVet/0
+=== CONT TestVet/4
+=== RUN TestVetDirs/incomplete
+=== PAUSE TestVetDirs/incomplete
+=== RUN TestVetDirs/cgo
+=== PAUSE TestVetDirs/cgo
+=== CONT TestVet/7
+=== CONT TestVet/6
+--- PASS: TestVetVerbose (0.04s)
+=== CONT TestVet/5
+=== CONT TestVet/3
+=== CONT TestVet/2
+--- PASS: TestTags (0.00s)
+ --- PASS: TestTags/x_testtag_y (0.04s)
+ vet_test.go:187: -tags=x testtag y
+ --- PASS: TestTags/x,testtag,y (0.04s)
+ vet_test.go:187: -tags=x,testtag,y
+ --- PASS: TestTags/testtag (0.04s)
+ vet_test.go:187: -tags=testtag
+=== CONT TestVet/1
+=== CONT TestVetDirs/testingpkg
+=== CONT TestVetDirs/buildtag
+=== CONT TestVetDirs/divergent
+=== CONT TestVetDirs/incomplete
+=== CONT TestVetDirs/cgo
+--- PASS: TestVet (0.39s)
+ --- PASS: TestVet/5 (0.07s)
+ vet_test.go:114: files: ["testdata/copylock_func.go" "testdata/rangeloop.go"]
+ --- PASS: TestVet/3 (0.07s)
+ vet_test.go:114: files: ["testdata/composite.go" "testdata/nilfunc.go"]
+ --- PASS: TestVet/6 (0.07s)
+ vet_test.go:114: files: ["testdata/copylock_range.go" "testdata/shadow.go"]
+ --- PASS: TestVet/2 (0.07s)
+ vet_test.go:114: files: ["testdata/bool.go" "testdata/method.go" "testdata/unused.go"]
+ --- PASS: TestVet/0 (0.13s)
+ vet_test.go:114: files: ["testdata/assign.go" "testdata/httpresponse.go" "testdata/structtag.go"]
+ --- PASS: TestVet/4 (0.16s)
+ vet_test.go:114: files: ["testdata/copylock.go" "testdata/print.go"]
+ --- PASS: TestVet/1 (0.07s)
+ vet_test.go:114: files: ["testdata/atomic.go" "testdata/lostcancel.go" "testdata/unsafeptr.go"]
+ --- PASS: TestVet/7 (0.19s)
+ vet_test.go:114: files: ["testdata/deadcode.go" "testdata/shift.go"]
+--- PASS: TestVetDirs (0.01s)
+ --- PASS: TestVetDirs/testingpkg (0.06s)
+ --- PASS: TestVetDirs/divergent (0.05s)
+ --- PASS: TestVetDirs/buildtag (0.06s)
+ --- PASS: TestVetDirs/incomplete (0.05s)
+ --- PASS: TestVetDirs/cgo (0.04s)
+--- PASS: TestVetAsm (0.75s)
+PASS
+ok cmd/vet (cached)
+
diff --git a/debug/logfilter/example2.out.expected b/debug/logfilter/example2.out.expected
new file mode 100644
index 000000000..767f8b1d1
--- /dev/null
+++ b/debug/logfilter/example2.out.expected
@@ -0,0 +1,22 @@
+--- PASS: TestVetVerbose (0.04s)
+--- PASS: TestTags (0.00s)
+ --- PASS: TestTags/x_testtag_y (0.04s)
+ --- PASS: TestTags/x,testtag,y (0.04s)
+ --- PASS: TestTags/testtag (0.04s)
+--- PASS: TestVet (0.39s)
+ --- PASS: TestVet/5 (0.07s)
+ --- PASS: TestVet/3 (0.07s)
+ --- PASS: TestVet/6 (0.07s)
+ --- PASS: TestVet/2 (0.07s)
+ --- PASS: TestVet/0 (0.13s)
+ --- PASS: TestVet/4 (0.16s)
+ --- PASS: TestVet/1 (0.07s)
+ --- PASS: TestVet/7 (0.19s)
+--- PASS: TestVetDirs (0.01s)
+ --- PASS: TestVetDirs/testingpkg (0.06s)
+ --- PASS: TestVetDirs/divergent (0.05s)
+ --- PASS: TestVetDirs/buildtag (0.06s)
+ --- PASS: TestVetDirs/incomplete (0.05s)
+ --- PASS: TestVetDirs/cgo (0.04s)
+--- PASS: TestVetAsm (0.75s)
+ok cmd/vet (cached)
diff --git a/debug/logfilter/example3.in b/debug/logfilter/example3.in
new file mode 100644
index 000000000..76b7f7ec5
--- /dev/null
+++ b/debug/logfilter/example3.in
@@ -0,0 +1,417 @@
+=== RUN TestGoalWithExpect
+=== RUN TestGoalWithExpect/catchpointCatchupTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/catchpointCatchupTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/createWalletTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/createWalletTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/doubleSpendingTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/doubleSpendingTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalFormattingTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalFormattingTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalNodeSystemdTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalNodeSystemdTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalNodeTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalNodeTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/statefulTealAppInfoTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/statefulTealAppInfoTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/corsTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/corsTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalAppAccountAddressTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalAppAccountAddressTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/listExpiredParticipationKeyTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/listExpiredParticipationKeyTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalNodeStatusTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalNodeStatusTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalTxValidityTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalTxValidityTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/ledgerTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/ledgerTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/tealAndStatefulTealTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+ expectFixture.go:157: err running 'tealAndStatefulTealTest.exp': exit status 1
+ stdout: TEST_ALGO_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod
+ TEST_DATA_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+ network create test_net_expect_1617038539
+ spawn goal network create --network test_net_expect_1617038539 --template /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/TwoNodes50EachFuture.json --datadir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod --rootdir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root
+
+ Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Wallet1.rootkey
+
+ Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Wallet2.rootkey
+
+ Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Wallet2.0.3000000.partkey
+
+ Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Wallet1.0.3000000.partkey
+
+ future 100000
+
+ Network test_net_expect_1617038539 created under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root
+
+ network start test_net_expect_1617038539
+ spawn goal network start -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root
+
+ Network Started under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root
+
+ network status test_net_expect_1617038539
+ spawn goal network status -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root
+
+
+
+ [Primary]
+
+ Last committed block: 0
+
+ Time since last block: 0.0s
+
+ Sync Time: 0.0s
+
+ Last consensus protocol: future
+
+ Next consensus protocol: future
+
+ Round for next consensus protocol: 1
+
+ Next consensus protocol supported: true
+
+
+
+ [Node]
+
+ Last committed block: 0
+
+ Time since last block: 0.0s
+
+ Sync Time: 0.2s
+
+ Last consensus protocol: future
+
+ Next consensus protocol: future
+
+ Round for next consensus protocol: 1
+
+ Next consensus protocol supported: true
+
+
+
+ StartNetwork complete
+ Primary node address is: 127.0.0.1:34369
+ Primary Node Address: 127.0.0.1:34369
+ spawn goal account list -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+
+ [online] IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A 5000000000000000 microAlgos
+
+ Account Address: IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A Balance: 5000000000000000
+ spawn goal account balance -w unencrypted-default-wallet -a IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+
+ 5000000000000000 microAlgos
+
+ Wallet: unencrypted-default-wallet, Account: IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A, Balance: 5000000000000000
+ Primary Account Balance: 5000000000000000
+ spawn goal account rewards -w unencrypted-default-wallet -a IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+
+ 0 microAlgosWallet: unencrypted-default-wallet, Account: IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A, Rewards: 0
+ Primary Account Rewards: 0
+ spawn goal wallet new Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+
+ Please choose a password for wallet 'Wallet_1_1617038539':
+
+ Please confirm the password:
+
+ Creating wallet...
+
+ Created wallet 'Wallet_1_1617038539'
+
+ Your new wallet has a backup phrase that can be used for recovery.
+
+ Keeping this backup phrase safe is extremely important.
+
+ Would you like to see it now? (Y/n): y
+
+ Your backup phrase is printed below.
+
+ Keep this information safe -- never share it with anyone!
+
+
+
+ One or more non-printable characters were ommited from the subsequent line:
+
+ [32mtest faculty trash brick need involve stand run jelly genius clap business zero ticket head grief rib fox ladder soap injury thrive balance able tissue[0m
+
+ WALLET_1_PASSPHRASE: test faculty trash brick need involve stand run jelly genius clap business zero ticket head grief rib fox ladder soap injury thrive balance able tissue
+ spawn goal wallet list -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+
+ ##################################################
+
+ Wallet: Wallet_1_1617038539
+
+ ID: f6c68ab0105dccf477e2dc3de44dda18
+
+ ##################################################
+
+ Wallet: unencrypted-default-wallet
+
+ ID: 2bc05b49cc7176c389a384a28d622f90
+
+ spawn goal account new -w Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+
+ Please enter the password for wallet 'Wallet_1_1617038539':
+
+ Created new account with address UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI
+
+ Account Address: UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI
+ spawn goal account list -w Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+
+ [offline] Unnamed-0 UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI 0 microAlgos *Default
+
+ Account Address: UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI
+ spawn goal wallet new Wallet_2_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+
+ Please choose a password for wallet 'Wallet_2_1617038539':
+
+ Please confirm the password:
+
+ Creating wallet...
+
+ Created wallet 'Wallet_2_1617038539'
+
+ Your new wallet has a backup phrase that can be used for recovery.
+
+ Keeping this backup phrase safe is extremely important.
+
+ Would you like to see it now? (Y/n): y
+
+ Your backup phrase is printed below.
+
+ Keep this information safe -- never share it with anyone!
+
+
+
+ One or more non-printable characters were ommited from the subsequent line:
+
+ [32mpowder sing write danger match cabin order oblige shrug slide tragic select true rule gym celery wool vendor salon goat summer rule dove able collect[0m
+
+ WALLET_2_PASSPHRASE: powder sing write danger match cabin order oblige shrug slide tragic select true rule gym celery wool vendor salon goat summer rule dove able collect
+ spawn goal wallet list -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+
+ ##################################################
+
+ Wallet: Wallet_1_1617038539
+
+ ID: f6c68ab0105dccf477e2dc3de44dda18
+
+ ##################################################
+
+ Wallet: Wallet_2_1617038539
+
+ ID: d3768bb0e3c128910cc02e0bc2b357de
+
+ spawn goal account new -w Wallet_2_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+
+ Please enter the password for wallet 'Wallet_2_1617038539':
+
+ Created new account with address OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI
+
+ Account Address: OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI
+ spawn goal account list -w Wallet_2_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+
+ [offline] Unnamed-1 OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI 0 microAlgos
+
+ Account Address: OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI
+ spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A --to UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+
+ Sent 1000000000 MicroAlgos from account IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A to address UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI, transaction ID: YXFXNI3MOJEUHPAC2SLZTHLA5ENKBHNHH6C65J5734SK4RC5NVMQ. Fee set to 1000
+
+ Transaction YXFXNI3MOJEUHPAC2SLZTHLA5ENKBHNHH6C65J5734SK4RC5NVMQ still pending as of round 4
+
+ Transaction YXFXNI3MOJEUHPAC2SLZTHLA5ENKBHNHH6C65J5734SK4RC5NVMQ committed in round 6
+
+ TRANSACTION_ID 1: YXFXNI3MOJEUHPAC2SLZTHLA5ENKBHNHH6C65J5734SK4RC5NVMQ
+ spawn goal account balance -a UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI -w Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+
+ 1000000000 microAlgos
+
+ Account Balance: 1000000000
+ Account balance OK: 1000000000
+ spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A --to OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+
+ Sent 1000000000 MicroAlgos from account IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A to address OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI, transaction ID: JC2HLHKP37M6O5675MWIRNA6PSTL34ALHTTAXDJQZY2GTKKXBEJQ. Fee set to 1000
+
+ Transaction JC2HLHKP37M6O5675MWIRNA6PSTL34ALHTTAXDJQZY2GTKKXBEJQ still pending as of round 6
+
+ Transaction JC2HLHKP37M6O5675MWIRNA6PSTL34ALHTTAXDJQZY2GTKKXBEJQ committed in round 8
+
+ TRANSACTION_ID 2: JC2HLHKP37M6O5675MWIRNA6PSTL34ALHTTAXDJQZY2GTKKXBEJQ
+ spawn goal account balance -a OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI -w Wallet_2_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+
+ 1000000000 microAlgos
+
+ Account Balance: 1000000000
+ Account balance OK: 1000000000
+ setting up working dir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work
+
+ writing teal script to file '/home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal'
+ reading from file /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal
+ #pragma version 2
+ int 1
+
+ spawn goal clerk compile /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal
+
+ /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal: YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA
+
+ spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A --to YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+
+ Sent 1000000000 MicroAlgos from account IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A to address YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA, transaction ID: WP3XSFI2IEK23AOQ5IVCIJ4DYE7JGUFALYWR3YBRVYVME5F6UJUQ. Fee set to 1000
+
+ Transaction WP3XSFI2IEK23AOQ5IVCIJ4DYE7JGUFALYWR3YBRVYVME5F6UJUQ still pending as of round 8
+
+ Transaction WP3XSFI2IEK23AOQ5IVCIJ4DYE7JGUFALYWR3YBRVYVME5F6UJUQ committed in round 10
+
+ TRANSACTION_ID_APP: WP3XSFI2IEK23AOQ5IVCIJ4DYE7JGUFALYWR3YBRVYVME5F6UJUQ, APP_ACCOUNT_ADDRESS: YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA
+ spawn goal account balance -a YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA -w Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+
+ 1000000000 microAlgos
+
+ Account Balance: 1000000000
+ Account balance OK: 1000000000
+ calling app create
+ calling goal app create
+ spawn goal app create --creator IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A --approval-prog /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal --global-byteslices 1 --global-ints 0 --local-byteslices 0 --local-ints 0 --clear-prog /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+
+ Attempting to create app (approval size 5, hash YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646A; clear size 5, hash YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646A)
+
+ Issued transaction from account IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A, txid FRAKID57TZRVVQIM342JHZFMEJ7LTEZAJM6CCMCRDUUN5F564LJQ (fee 1000)
+
+ Transaction FRAKID57TZRVVQIM342JHZFMEJ7LTEZAJM6CCMCRDUUN5F564LJQ still pending as of round 11
+
+ Transaction FRAKID57TZRVVQIM342JHZFMEJ7LTEZAJM6CCMCRDUUN5F564LJQ committed in round 13
+
+ Created app with app index 4
+
+ App ID 4
+ spawn goal clerk sign -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ -w Wallet_1_1617038539 -i unsginedtransaction1.tx -o sginedtransaction1.tx
+
+ Please enter the password for wallet 'Wallet_1_1617038539':
+
+ form combined transaction
+ create group transaction
+ spawn goal clerk group -i combinedtransactions.tx -o groupedtransactions.tx
+
+ split transaction
+ spawn goal clerk split -i groupedtransactions.tx -o split.tx
+
+ Wrote transaction 0 to split-0.tx
+
+ Wrote transaction 1 to split-1.tx
+
+ sign the split transaction
+ spawn goal clerk sign -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ -w Wallet_1_1617038539 -i split-0.tx -o signout-0.tx
+
+ Aborting with Error: Timed out signing transaction
+ GLOBAL_TEST_ROOT_DIR /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root
+ GLOBAL_NETWORK_NAME test_net_expect_1617038539
+ Stopping network: test_net_expect_1617038539
+ spawn goal network stop -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root
+
+ Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root
+
+ Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root
+
+ stderr:
+=== RUN TestGoalWithExpect/goalAccountTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalAccountTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/statefulTealAppReadTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/statefulTealAppReadTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/statefulTealCreateAppTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/statefulTealCreateAppTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/basicExpectTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/basicExpectTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/basicGoalTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/basicGoalTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalAssetTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalAssetTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalClerkGroupTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalClerkGroupTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalNodeConnectionTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalNodeConnectionTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/multisigCreationDeletionTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/multisigCreationDeletionTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/pingpongTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/pingpongTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalAccountInfoTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalAccountInfoTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalDryrunRestTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalDryrunRestTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/limitOrderTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/limitOrderTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/reportTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/reportTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/tealConsensusTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealConsensusTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalCmdFlagsTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalCmdFlagsTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/testInfraTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/testInfraTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+--- FAIL: TestGoalWithExpect (1991.52s)
+ --- PASS: TestGoalWithExpect/catchpointCatchupTest.exp (131.36s)
+ --- PASS: TestGoalWithExpect/createWalletTest.exp (103.91s)
+ --- PASS: TestGoalWithExpect/doubleSpendingTest.exp (63.10s)
+ --- PASS: TestGoalWithExpect/goalFormattingTest.exp (0.25s)
+ --- PASS: TestGoalWithExpect/goalNodeSystemdTest.exp (0.24s)
+ --- PASS: TestGoalWithExpect/goalNodeTest.exp (16.24s)
+ --- PASS: TestGoalWithExpect/statefulTealAppInfoTest.exp (23.72s)
+ --- PASS: TestGoalWithExpect/corsTest.exp (8.28s)
+ --- PASS: TestGoalWithExpect/goalAppAccountAddressTest.exp (90.79s)
+ --- PASS: TestGoalWithExpect/listExpiredParticipationKeyTest.exp (74.16s)
+ --- PASS: TestGoalWithExpect/goalNodeStatusTest.exp (19.26s)
+ --- PASS: TestGoalWithExpect/goalTxValidityTest.exp (9.42s)
+ --- PASS: TestGoalWithExpect/ledgerTest.exp (7.30s)
+ --- FAIL: TestGoalWithExpect/tealAndStatefulTealTest.exp (83.90s)
+ --- PASS: TestGoalWithExpect/goalAccountTest.exp (91.38s)
+ --- PASS: TestGoalWithExpect/statefulTealAppReadTest.exp (100.50s)
+ --- PASS: TestGoalWithExpect/statefulTealCreateAppTest.exp (276.31s)
+ --- PASS: TestGoalWithExpect/basicExpectTest.exp (0.03s)
+ --- PASS: TestGoalWithExpect/basicGoalTest.exp (52.36s)
+ --- PASS: TestGoalWithExpect/goalAssetTest.exp (24.90s)
+ --- PASS: TestGoalWithExpect/goalClerkGroupTest.exp (18.57s)
+ --- PASS: TestGoalWithExpect/goalNodeConnectionTest.exp (7.81s)
+ --- PASS: TestGoalWithExpect/multisigCreationDeletionTest.exp (11.85s)
+ --- PASS: TestGoalWithExpect/pingpongTest.exp (516.38s)
+ --- PASS: TestGoalWithExpect/goalAccountInfoTest.exp (111.10s)
+ --- PASS: TestGoalWithExpect/goalDryrunRestTest.exp (25.73s)
+ --- PASS: TestGoalWithExpect/limitOrderTest.exp (107.48s)
+ --- PASS: TestGoalWithExpect/reportTest.exp (5.93s)
+ --- PASS: TestGoalWithExpect/tealConsensusTest.exp (5.96s)
+ --- PASS: TestGoalWithExpect/goalCmdFlagsTest.exp (0.76s)
+ --- PASS: TestGoalWithExpect/testInfraTest.exp (2.52s)
+FAIL
+FAIL github.com/algorand/go-algorand/test/e2e-go/cli/goal/expect 1991.565s
+
diff --git a/debug/logfilter/example3.out.expected b/debug/logfilter/example3.out.expected
new file mode 100644
index 000000000..c2f2f5818
--- /dev/null
+++ b/debug/logfilter/example3.out.expected
@@ -0,0 +1,206 @@
+
+--- FAIL: TestGoalWithExpect (1991.52s)
+ --- PASS: TestGoalWithExpect/catchpointCatchupTest.exp (131.36s)
+ --- PASS: TestGoalWithExpect/createWalletTest.exp (103.91s)
+ --- PASS: TestGoalWithExpect/doubleSpendingTest.exp (63.10s)
+ --- PASS: TestGoalWithExpect/goalFormattingTest.exp (0.25s)
+ --- PASS: TestGoalWithExpect/goalNodeSystemdTest.exp (0.24s)
+ --- PASS: TestGoalWithExpect/goalNodeTest.exp (16.24s)
+ --- PASS: TestGoalWithExpect/statefulTealAppInfoTest.exp (23.72s)
+ --- PASS: TestGoalWithExpect/corsTest.exp (8.28s)
+ --- PASS: TestGoalWithExpect/goalAppAccountAddressTest.exp (90.79s)
+ --- PASS: TestGoalWithExpect/listExpiredParticipationKeyTest.exp (74.16s)
+ --- PASS: TestGoalWithExpect/goalNodeStatusTest.exp (19.26s)
+ --- PASS: TestGoalWithExpect/goalTxValidityTest.exp (9.42s)
+ --- PASS: TestGoalWithExpect/ledgerTest.exp (7.30s)
+
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+ expectFixture.go:157: err running 'tealAndStatefulTealTest.exp': exit status 1
+ stdout: TEST_ALGO_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod
+ TEST_DATA_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+ network create test_net_expect_1617038539
+ spawn goal network create --network test_net_expect_1617038539 --template /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/TwoNodes50EachFuture.json --datadir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod --rootdir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root
+ Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Wallet1.rootkey
+ Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Wallet2.rootkey
+ Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Wallet2.0.3000000.partkey
+ Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Wallet1.0.3000000.partkey
+ future 100000
+ Network test_net_expect_1617038539 created under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root
+ network start test_net_expect_1617038539
+ spawn goal network start -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root
+ Network Started under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root
+ network status test_net_expect_1617038539
+ spawn goal network status -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root
+
+ [Primary]
+ Last committed block: 0
+ Time since last block: 0.0s
+ Sync Time: 0.0s
+ Last consensus protocol: future
+ Next consensus protocol: future
+ Round for next consensus protocol: 1
+ Next consensus protocol supported: true
+
+ [Node]
+ Last committed block: 0
+ Time since last block: 0.0s
+ Sync Time: 0.2s
+ Last consensus protocol: future
+ Next consensus protocol: future
+ Round for next consensus protocol: 1
+ Next consensus protocol supported: true
+
+ StartNetwork complete
+ Primary node address is: 127.0.0.1:34369
+ Primary Node Address: 127.0.0.1:34369
+ spawn goal account list -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+ [online] IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A 5000000000000000 microAlgos
+ Account Address: IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A Balance: 5000000000000000
+ spawn goal account balance -w unencrypted-default-wallet -a IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+ 5000000000000000 microAlgos
+ Wallet: unencrypted-default-wallet, Account: IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A, Balance: 5000000000000000
+ Primary Account Balance: 5000000000000000
+ spawn goal account rewards -w unencrypted-default-wallet -a IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+ 0 microAlgosWallet: unencrypted-default-wallet, Account: IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A, Rewards: 0
+ Primary Account Rewards: 0
+ spawn goal wallet new Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+ Please choose a password for wallet 'Wallet_1_1617038539':
+ Please confirm the password:
+ Creating wallet...
+ Created wallet 'Wallet_1_1617038539'
+ Your new wallet has a backup phrase that can be used for recovery.
+ Keeping this backup phrase safe is extremely important.
+ Would you like to see it now? (Y/n): y
+ Your backup phrase is printed below.
+ Keep this information safe -- never share it with anyone!
+
+ One or more non-printable characters were ommited from the subsequent line:
+ [32mtest faculty trash brick need involve stand run jelly genius clap business zero ticket head grief rib fox ladder soap injury thrive balance able tissue[0m
+ WALLET_1_PASSPHRASE: test faculty trash brick need involve stand run jelly genius clap business zero ticket head grief rib fox ladder soap injury thrive balance able tissue
+ spawn goal wallet list -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+ ##################################################
+ Wallet: Wallet_1_1617038539
+ ID: f6c68ab0105dccf477e2dc3de44dda18
+ ##################################################
+ Wallet: unencrypted-default-wallet
+ ID: 2bc05b49cc7176c389a384a28d622f90
+ spawn goal account new -w Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+ Please enter the password for wallet 'Wallet_1_1617038539':
+ Created new account with address UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI
+ Account Address: UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI
+ spawn goal account list -w Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+ [offline] Unnamed-0 UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI 0 microAlgos *Default
+ Account Address: UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI
+ spawn goal wallet new Wallet_2_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+ Please choose a password for wallet 'Wallet_2_1617038539':
+ Please confirm the password:
+ Creating wallet...
+ Created wallet 'Wallet_2_1617038539'
+ Your new wallet has a backup phrase that can be used for recovery.
+ Keeping this backup phrase safe is extremely important.
+ Would you like to see it now? (Y/n): y
+ Your backup phrase is printed below.
+ Keep this information safe -- never share it with anyone!
+
+ One or more non-printable characters were ommited from the subsequent line:
+ [32mpowder sing write danger match cabin order oblige shrug slide tragic select true rule gym celery wool vendor salon goat summer rule dove able collect[0m
+ WALLET_2_PASSPHRASE: powder sing write danger match cabin order oblige shrug slide tragic select true rule gym celery wool vendor salon goat summer rule dove able collect
+ spawn goal wallet list -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+ ##################################################
+ Wallet: Wallet_1_1617038539
+ ID: f6c68ab0105dccf477e2dc3de44dda18
+ ##################################################
+ Wallet: Wallet_2_1617038539
+ ID: d3768bb0e3c128910cc02e0bc2b357de
+ spawn goal account new -w Wallet_2_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+ Please enter the password for wallet 'Wallet_2_1617038539':
+ Created new account with address OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI
+ Account Address: OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI
+ spawn goal account list -w Wallet_2_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+ [offline] Unnamed-1 OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI 0 microAlgos
+ Account Address: OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI
+ spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A --to UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+ Sent 1000000000 MicroAlgos from account IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A to address UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI, transaction ID: YXFXNI3MOJEUHPAC2SLZTHLA5ENKBHNHH6C65J5734SK4RC5NVMQ. Fee set to 1000
+ Transaction YXFXNI3MOJEUHPAC2SLZTHLA5ENKBHNHH6C65J5734SK4RC5NVMQ still pending as of round 4
+ Transaction YXFXNI3MOJEUHPAC2SLZTHLA5ENKBHNHH6C65J5734SK4RC5NVMQ committed in round 6
+ TRANSACTION_ID 1: YXFXNI3MOJEUHPAC2SLZTHLA5ENKBHNHH6C65J5734SK4RC5NVMQ
+ spawn goal account balance -a UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI -w Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+ 1000000000 microAlgos
+ Account Balance: 1000000000
+ Account balance OK: 1000000000
+ spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A --to OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+ Sent 1000000000 MicroAlgos from account IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A to address OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI, transaction ID: JC2HLHKP37M6O5675MWIRNA6PSTL34ALHTTAXDJQZY2GTKKXBEJQ. Fee set to 1000
+ Transaction JC2HLHKP37M6O5675MWIRNA6PSTL34ALHTTAXDJQZY2GTKKXBEJQ still pending as of round 6
+ Transaction JC2HLHKP37M6O5675MWIRNA6PSTL34ALHTTAXDJQZY2GTKKXBEJQ committed in round 8
+ TRANSACTION_ID 2: JC2HLHKP37M6O5675MWIRNA6PSTL34ALHTTAXDJQZY2GTKKXBEJQ
+ spawn goal account balance -a OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI -w Wallet_2_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+ 1000000000 microAlgos
+ Account Balance: 1000000000
+ Account balance OK: 1000000000
+ setting up working dir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work
+
+ writing teal script to file '/home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal'
+ reading from file /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal
+ #pragma version 2
+ int 1
+
+ spawn goal clerk compile /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal
+ /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal: YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA
+ spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A --to YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+ Sent 1000000000 MicroAlgos from account IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A to address YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA, transaction ID: WP3XSFI2IEK23AOQ5IVCIJ4DYE7JGUFALYWR3YBRVYVME5F6UJUQ. Fee set to 1000
+ Transaction WP3XSFI2IEK23AOQ5IVCIJ4DYE7JGUFALYWR3YBRVYVME5F6UJUQ still pending as of round 8
+ Transaction WP3XSFI2IEK23AOQ5IVCIJ4DYE7JGUFALYWR3YBRVYVME5F6UJUQ committed in round 10
+ TRANSACTION_ID_APP: WP3XSFI2IEK23AOQ5IVCIJ4DYE7JGUFALYWR3YBRVYVME5F6UJUQ, APP_ACCOUNT_ADDRESS: YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA
+ spawn goal account balance -a YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA -w Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+ 1000000000 microAlgos
+ Account Balance: 1000000000
+ Account balance OK: 1000000000
+ calling app create
+ calling goal app create
+ spawn goal app create --creator IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A --approval-prog /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal --global-byteslices 1 --global-ints 0 --local-byteslices 0 --local-ints 0 --clear-prog /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/
+ Attempting to create app (approval size 5, hash YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646A; clear size 5, hash YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646A)
+ Issued transaction from account IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A, txid FRAKID57TZRVVQIM342JHZFMEJ7LTEZAJM6CCMCRDUUN5F564LJQ (fee 1000)
+ Transaction FRAKID57TZRVVQIM342JHZFMEJ7LTEZAJM6CCMCRDUUN5F564LJQ still pending as of round 11
+ Transaction FRAKID57TZRVVQIM342JHZFMEJ7LTEZAJM6CCMCRDUUN5F564LJQ committed in round 13
+ Created app with app index 4
+ App ID 4
+ spawn goal clerk sign -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ -w Wallet_1_1617038539 -i unsginedtransaction1.tx -o sginedtransaction1.tx
+ Please enter the password for wallet 'Wallet_1_1617038539':
+ form combined transaction
+ create group transaction
+ spawn goal clerk group -i combinedtransactions.tx -o groupedtransactions.tx
+ split transaction
+ spawn goal clerk split -i groupedtransactions.tx -o split.tx
+ Wrote transaction 0 to split-0.tx
+ Wrote transaction 1 to split-1.tx
+ sign the split transaction
+ spawn goal clerk sign -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ -w Wallet_1_1617038539 -i split-0.tx -o signout-0.tx
+ Aborting with Error: Timed out signing transaction
+ GLOBAL_TEST_ROOT_DIR /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root
+ GLOBAL_NETWORK_NAME test_net_expect_1617038539
+ Stopping network: test_net_expect_1617038539
+ spawn goal network stop -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root
+ Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root
+ Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root
+
+ stderr:
+ --- FAIL: TestGoalWithExpect/tealAndStatefulTealTest.exp (83.90s)
+ --- PASS: TestGoalWithExpect/goalAccountTest.exp (91.38s)
+ --- PASS: TestGoalWithExpect/statefulTealAppReadTest.exp (100.50s)
+ --- PASS: TestGoalWithExpect/statefulTealCreateAppTest.exp (276.31s)
+ --- PASS: TestGoalWithExpect/basicExpectTest.exp (0.03s)
+ --- PASS: TestGoalWithExpect/basicGoalTest.exp (52.36s)
+ --- PASS: TestGoalWithExpect/goalAssetTest.exp (24.90s)
+ --- PASS: TestGoalWithExpect/goalClerkGroupTest.exp (18.57s)
+ --- PASS: TestGoalWithExpect/goalNodeConnectionTest.exp (7.81s)
+ --- PASS: TestGoalWithExpect/multisigCreationDeletionTest.exp (11.85s)
+ --- PASS: TestGoalWithExpect/pingpongTest.exp (516.38s)
+ --- PASS: TestGoalWithExpect/goalAccountInfoTest.exp (111.10s)
+ --- PASS: TestGoalWithExpect/goalDryrunRestTest.exp (25.73s)
+ --- PASS: TestGoalWithExpect/limitOrderTest.exp (107.48s)
+ --- PASS: TestGoalWithExpect/reportTest.exp (5.93s)
+ --- PASS: TestGoalWithExpect/tealConsensusTest.exp (5.96s)
+ --- PASS: TestGoalWithExpect/goalCmdFlagsTest.exp (0.76s)
+ --- PASS: TestGoalWithExpect/testInfraTest.exp (2.52s)
+FAIL github.com/algorand/go-algorand/test/e2e-go/cli/goal/expect 1991.565s
diff --git a/debug/logfilter/example4.in b/debug/logfilter/example4.in
new file mode 100644
index 000000000..fb04351de
--- /dev/null
+++ b/debug/logfilter/example4.in
@@ -0,0 +1,21 @@
+=== RUN TestAlgodLogsToFile
+=== PAUSE TestAlgodLogsToFile
+=== CONT TestAlgodLogsToFile
+Created new rootkey: /home/ubuntu/go/src/github.com/algorand/go-algorand/tmp/out/e2e/103921-1617052919657/TestAlgodLogsToFile/Wallet2.rootkey
+Created new rootkey: /home/ubuntu/go/src/github.com/algorand/go-algorand/tmp/out/e2e/103921-1617052919657/TestAlgodLogsToFile/Wallet1.rootkey
+Created new partkey: /home/ubuntu/go/src/github.com/algorand/go-algorand/tmp/out/e2e/103921-1617052919657/TestAlgodLogsToFile/Wallet1.0.3000000.partkey
+Created new partkey: /home/ubuntu/go/src/github.com/algorand/go-algorand/tmp/out/e2e/103921-1617052919657/TestAlgodLogsToFile/Wallet2.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(3523) : No REST API Token found. Generated token: 48ccf1649eeda3dcac44b414e134f5fad0cd12fbd01097386a44e6569cfe1404
+algod(3523) : No Admin REST API Token found. Generated token: 1427e9e6cb62beeabb825265ccf2d07d57ad4ca904275f83cb5fc28a539589cb
+algod(3523) : Logging to: /home/ubuntu/go/src/github.com/algorand/go-algorand/tmp/out/e2e/103921-1617052919657/TestAlgodLogsToFile/Primary/node.log
+algod(3523) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3523) : Initializing the Algorand node...
+algod(3523) : Success!
+algod(3523) : ⇨ http server started on 127.0.0.1:8080
+algod(3523) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(3523) : Exiting on terminated
+--- PASS: TestAlgodLogsToFile (6.64s)
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/cli/algod 6.744s
+
diff --git a/debug/logfilter/example4.out.expected b/debug/logfilter/example4.out.expected
new file mode 100644
index 000000000..b85960134
--- /dev/null
+++ b/debug/logfilter/example4.out.expected
@@ -0,0 +1,2 @@
+--- PASS: TestAlgodLogsToFile (6.64s)
+ok github.com/algorand/go-algorand/test/e2e-go/cli/algod 6.744s
diff --git a/debug/logfilter/example5.in b/debug/logfilter/example5.in
new file mode 100644
index 000000000..599b6e0ba
--- /dev/null
+++ b/debug/logfilter/example5.in
@@ -0,0 +1,128 @@
+=== RUN TestGoalWithExpect
+=== RUN TestGoalWithExpect/basicExpectTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/basicExpectTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/corsTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/corsTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalClerkGroupTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalClerkGroupTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/listExpiredParticipationKeyTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/listExpiredParticipationKeyTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/testInfraTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/testInfraTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/doubleSpendingTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/doubleSpendingTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalNodeStatusTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalNodeStatusTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalNodeSystemdTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalNodeSystemdTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalAssetTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalAssetTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalFormattingTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalFormattingTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalNodeConnectionTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalNodeConnectionTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/statefulTealCreateAppTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/statefulTealCreateAppTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalNodeTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalNodeTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/reportTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/reportTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/statefulTealAppInfoTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/statefulTealAppInfoTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalDryrunRestTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalDryrunRestTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalTxValidityTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalTxValidityTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/limitOrderTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/limitOrderTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/basicGoalTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/basicGoalTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/catchpointCatchupTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/catchpointCatchupTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/createWalletTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/createWalletTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalAccountInfoTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalAccountInfoTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalAppAccountAddressTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalAppAccountAddressTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/pingpongTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/pingpongTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/tealConsensusTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/tealConsensusTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalAccountTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalAccountTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/statefulTealAppReadTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/statefulTealAppReadTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/tealAndStatefulTealTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/tealAndStatefulTealTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalCmdFlagsTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalCmdFlagsTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/ledgerTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/ledgerTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/multisigCreationDeletionTest.exp
+ expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/multisigCreationDeletionTest/algod
+ testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata
+--- PASS: TestGoalWithExpect (1412.29s)
+ --- PASS: TestGoalWithExpect/basicExpectTest.exp (0.05s)
+ --- PASS: TestGoalWithExpect/corsTest.exp (10.75s)
+ --- PASS: TestGoalWithExpect/goalClerkGroupTest.exp (11.14s)
+ --- PASS: TestGoalWithExpect/listExpiredParticipationKeyTest.exp (73.87s)
+ --- PASS: TestGoalWithExpect/testInfraTest.exp (2.15s)
+ --- PASS: TestGoalWithExpect/doubleSpendingTest.exp (48.80s)
+ --- PASS: TestGoalWithExpect/goalNodeStatusTest.exp (21.91s)
+ --- PASS: TestGoalWithExpect/goalNodeSystemdTest.exp (0.09s)
+ --- PASS: TestGoalWithExpect/goalAssetTest.exp (19.00s)
+ --- PASS: TestGoalWithExpect/goalFormattingTest.exp (0.51s)
+ --- PASS: TestGoalWithExpect/goalNodeConnectionTest.exp (9.62s)
+ --- PASS: TestGoalWithExpect/statefulTealCreateAppTest.exp (179.12s)
+ --- PASS: TestGoalWithExpect/goalNodeTest.exp (14.21s)
+ --- PASS: TestGoalWithExpect/reportTest.exp (10.75s)
+ --- PASS: TestGoalWithExpect/statefulTealAppInfoTest.exp (20.44s)
+ --- PASS: TestGoalWithExpect/goalDryrunRestTest.exp (16.22s)
+ --- PASS: TestGoalWithExpect/goalTxValidityTest.exp (11.10s)
+ --- PASS: TestGoalWithExpect/limitOrderTest.exp (87.35s)
+ --- PASS: TestGoalWithExpect/basicGoalTest.exp (30.76s)
+ --- PASS: TestGoalWithExpect/catchpointCatchupTest.exp (88.84s)
+ --- PASS: TestGoalWithExpect/createWalletTest.exp (87.54s)
+ --- PASS: TestGoalWithExpect/goalAccountInfoTest.exp (91.66s)
+ --- PASS: TestGoalWithExpect/goalAppAccountAddressTest.exp (74.24s)
+ --- PASS: TestGoalWithExpect/pingpongTest.exp (356.50s)
+ --- PASS: TestGoalWithExpect/tealConsensusTest.exp (11.29s)
+ --- PASS: TestGoalWithExpect/goalAccountTest.exp (23.35s)
+ --- PASS: TestGoalWithExpect/statefulTealAppReadTest.exp (34.37s)
+ --- PASS: TestGoalWithExpect/tealAndStatefulTealTest.exp (52.53s)
+ --- PASS: TestGoalWithExpect/goalCmdFlagsTest.exp (0.38s)
+ --- PASS: TestGoalWithExpect/ledgerTest.exp (10.54s)
+ --- PASS: TestGoalWithExpect/multisigCreationDeletionTest.exp (13.22s)
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/cli/goal/expect 1412.538s
diff --git a/debug/logfilter/example5.out.expected b/debug/logfilter/example5.out.expected
new file mode 100644
index 000000000..046e2dd2e
--- /dev/null
+++ b/debug/logfilter/example5.out.expected
@@ -0,0 +1,33 @@
+--- PASS: TestGoalWithExpect (1412.29s)
+ --- PASS: TestGoalWithExpect/basicExpectTest.exp (0.05s)
+ --- PASS: TestGoalWithExpect/corsTest.exp (10.75s)
+ --- PASS: TestGoalWithExpect/goalClerkGroupTest.exp (11.14s)
+ --- PASS: TestGoalWithExpect/listExpiredParticipationKeyTest.exp (73.87s)
+ --- PASS: TestGoalWithExpect/testInfraTest.exp (2.15s)
+ --- PASS: TestGoalWithExpect/doubleSpendingTest.exp (48.80s)
+ --- PASS: TestGoalWithExpect/goalNodeStatusTest.exp (21.91s)
+ --- PASS: TestGoalWithExpect/goalNodeSystemdTest.exp (0.09s)
+ --- PASS: TestGoalWithExpect/goalAssetTest.exp (19.00s)
+ --- PASS: TestGoalWithExpect/goalFormattingTest.exp (0.51s)
+ --- PASS: TestGoalWithExpect/goalNodeConnectionTest.exp (9.62s)
+ --- PASS: TestGoalWithExpect/statefulTealCreateAppTest.exp (179.12s)
+ --- PASS: TestGoalWithExpect/goalNodeTest.exp (14.21s)
+ --- PASS: TestGoalWithExpect/reportTest.exp (10.75s)
+ --- PASS: TestGoalWithExpect/statefulTealAppInfoTest.exp (20.44s)
+ --- PASS: TestGoalWithExpect/goalDryrunRestTest.exp (16.22s)
+ --- PASS: TestGoalWithExpect/goalTxValidityTest.exp (11.10s)
+ --- PASS: TestGoalWithExpect/limitOrderTest.exp (87.35s)
+ --- PASS: TestGoalWithExpect/basicGoalTest.exp (30.76s)
+ --- PASS: TestGoalWithExpect/catchpointCatchupTest.exp (88.84s)
+ --- PASS: TestGoalWithExpect/createWalletTest.exp (87.54s)
+ --- PASS: TestGoalWithExpect/goalAccountInfoTest.exp (91.66s)
+ --- PASS: TestGoalWithExpect/goalAppAccountAddressTest.exp (74.24s)
+ --- PASS: TestGoalWithExpect/pingpongTest.exp (356.50s)
+ --- PASS: TestGoalWithExpect/tealConsensusTest.exp (11.29s)
+ --- PASS: TestGoalWithExpect/goalAccountTest.exp (23.35s)
+ --- PASS: TestGoalWithExpect/statefulTealAppReadTest.exp (34.37s)
+ --- PASS: TestGoalWithExpect/tealAndStatefulTealTest.exp (52.53s)
+ --- PASS: TestGoalWithExpect/goalCmdFlagsTest.exp (0.38s)
+ --- PASS: TestGoalWithExpect/ledgerTest.exp (10.54s)
+ --- PASS: TestGoalWithExpect/multisigCreationDeletionTest.exp (13.22s)
+ok github.com/algorand/go-algorand/test/e2e-go/cli/goal/expect 1412.538s
diff --git a/debug/logfilter/example6.in b/debug/logfilter/example6.in
new file mode 100644
index 000000000..ab22b4c54
--- /dev/null
+++ b/debug/logfilter/example6.in
@@ -0,0 +1,2619 @@
+=== RUN TestNodeControllerCleanup
+=== PAUSE TestNodeControllerCleanup
+=== RUN TestAlgodLogsToFile
+=== PAUSE TestAlgodLogsToFile
+=== CONT TestAlgodLogsToFile
+=== CONT TestNodeControllerCleanup
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAlgodLogsToFile/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestNodeControllerCleanup/Offline.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAlgodLogsToFile/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestNodeControllerCleanup/Rich.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestNodeControllerCleanup/Online.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestNodeControllerCleanup/Partkey.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestNodeControllerCleanup/Online.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestNodeControllerCleanup/Partkey.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAlgodLogsToFile/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAlgodLogsToFile/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestNodeControllerCleanup/Rich.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(24836) : No REST API Token found. Generated token: 51a8e4dededbe29ed75d6c150b5476386046f063de4da4339799e045a8ab59a4
+algod(24836) : No Admin REST API Token found. Generated token: 5747d77952ad65ab4b88fb4e3f4a804d69753fb96c3eaf09c55ad17ab58cef39
+algod(24836) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestNodeControllerCleanup/Primary/node.log
+algod(24841) : No REST API Token found. Generated token: 7d99abec3e49d3d0daea14a6d107bbc17de8c60f4784c4abf9910bca248ee1c3
+algod(24841) : No Admin REST API Token found. Generated token: e73fc1607e8c0bc14b5559ef61ac551c3facdd1cd4bf7df0566ffcdfe5158873
+algod(24841) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAlgodLogsToFile/Primary/node.log
+algod(24836) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(24836) : Initializing the Algorand node...
+algod(24841) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(24841) : Initializing the Algorand node...
+algod(24841) : Success!
+algod(24836) : Success!
+algod(24836) : ⇨ http server started on 127.0.0.1:8080
+algod(24841) : ⇨ http server started on 127.0.0.1:46195
+algod(24841) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:46195. Press Ctrl-C to exit
+algod(24836) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(24867) : No REST API Token found. Generated token: 385d64cae2bd97c5635171413b0353dab92c13d810979de5ad5e7b3e7323efcb
+algod(24867) : No Admin REST API Token found. Generated token: c28e99b75ebad7597b9568ae940c8e5de27f06a65d6b9f5c91c8b40d0bd29cbf
+algod(24867) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestNodeControllerCleanup/Node/node.log
+algod(24867) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(24867) : Initializing the Algorand node...
+algod(24867) : Success!
+algod(24867) : ⇨ http server started on 127.0.0.1:45295
+algod(24867) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:45295. Press Ctrl-C to exit
+algod(24841) : Exiting on terminated
+--- PASS: TestAlgodLogsToFile (1.25s)
+algod(24867) : Exiting on terminated
+algod(24836) : Exiting on terminated
+--- PASS: TestNodeControllerCleanup (5.44s)
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/cli/algod 5.507s
+=== RUN TestAlgodWithExpect
+=== RUN TestAlgodWithExpect/algodTelemetryLocationTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/algodTelemetryLocationTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+--- PASS: TestAlgodWithExpect (0.07s)
+ --- PASS: TestAlgodWithExpect/algodTelemetryLocationTest.exp (0.07s)
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/cli/algod/expect 0.150s
+=== RUN TestAlgohWithExpect
+=== RUN TestAlgohWithExpect/algohTimeoutTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/algohTimeoutTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+--- PASS: TestAlgohWithExpect (213.98s)
+ --- PASS: TestAlgohWithExpect/algohTimeoutTest.exp (213.98s)
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/cli/algoh/expect 214.091s
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/GoalTests/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/GoalTests/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/GoalTests/Wallet2.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/GoalTests/Wallet1.0.3000000.partkey
+algod(25012) : No REST API Token found. Generated token: d707b0702703c34a98760aed697b86af287935b85617880b358980aa41787399
+algod(25012) : No Admin REST API Token found. Generated token: 619b762ae4744299fd76b696b3d2980328b792035ac6be23e7a5977775712e33
+algod(25012) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/GoalTests/Primary/node.log
+algod(25012) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(25012) : Initializing the Algorand node...
+algod(25012) : Success!
+algod(25012) : ⇨ http server started on 127.0.0.1:8080
+algod(25012) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(25019) : No REST API Token found. Generated token: 0ed2f171733f8c77c8e70171a1db302871f3c831081ec593a2c0ff9a5197069d
+algod(25019) : No Admin REST API Token found. Generated token: 43b07c23bae76ae7fbf5bbfb0c7785654fa68fc83910af3e3e32ff6ff13f86d1
+algod(25019) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/GoalTests/Node/node.log
+algod(25019) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(25019) : Initializing the Algorand node...
+algod(25019) : Success!
+algod(25019) : ⇨ http server started on 127.0.0.1:44729
+algod(25019) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:44729. Press Ctrl-C to exit
+=== RUN TestAccountNew
+--- PASS: TestAccountNew (1.75s)
+=== RUN TestAccountNewDuplicateFails
+--- PASS: TestAccountNewDuplicateFails (0.30s)
+=== RUN TestAccountRename
+--- PASS: TestAccountRename (0.85s)
+=== RUN TestAccountMultipleImportRootKey
+--- PASS: TestAccountMultipleImportRootKey (0.49s)
+=== RUN TestClerkSendNoteEncoding
+--- PASS: TestClerkSendNoteEncoding (18.65s)
+=== RUN TestGoalNodeCleanup
+algod(25012) : Exiting on terminated
+--- PASS: TestGoalNodeCleanup (0.44s)
+PASS
+algod(25019) : Exiting on terminated
+ok github.com/algorand/go-algorand/test/e2e-go/cli/goal 26.354s
+=== RUN TestGoalWithExpect
+=== RUN TestGoalWithExpect/basicGoalTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/basicGoalTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalAppAccountAddressTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalAppAccountAddressTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalNodeConnectionTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalNodeConnectionTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalNodeSystemdTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalNodeSystemdTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/multisigCreationDeletionTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/multisigCreationDeletionTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/reportTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/reportTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/statefulTealAppInfoTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/statefulTealAppInfoTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/basicExpectTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/basicExpectTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/listExpiredParticipationKeyTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/listExpiredParticipationKeyTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalNodeTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalNodeTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/doubleSpendingTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/doubleSpendingTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalAccountTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalAccountTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalCmdFlagsTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalCmdFlagsTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalDryrunRestTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalDryrunRestTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalFormattingTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalFormattingTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/limitOrderTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/limitOrderTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/tealConsensusTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealConsensusTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalAssetTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalAssetTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/ledgerTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/ledgerTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/tealAndStatefulTealTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+ expectFixture.go:157: err running 'tealAndStatefulTealTest.exp': exit status 1
+ stdout: TEST_ALGO_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod
+ TEST_DATA_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+ network create test_net_expect_1617230151
+ spawn goal network create --network test_net_expect_1617230151 --template /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/TwoNodes50EachFuture.json --datadir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod --rootdir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root
+
+ Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Wallet1.rootkey
+
+ Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Wallet2.rootkey
+
+ Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Wallet1.0.3000000.partkey
+
+ Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Wallet2.0.3000000.partkey
+
+ future 100000
+
+ Network test_net_expect_1617230151 created under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root
+
+ network start test_net_expect_1617230151
+ spawn goal network start -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root
+
+ Network Started under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root
+
+ network status test_net_expect_1617230151
+ spawn goal network status -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root
+
+
+
+ [Primary]
+
+ Last committed block: 0
+
+ Time since last block: 0.0s
+
+ Sync Time: 0.0s
+
+ Last consensus protocol: future
+
+ Next consensus protocol: future
+
+ Round for next consensus protocol: 1
+
+ Next consensus protocol supported: true
+
+
+
+ [Node]
+
+ Last committed block: 0
+
+ Time since last block: 0.0s
+
+ Sync Time: 0.6s
+
+ Last consensus protocol: future
+
+ Next consensus protocol: future
+
+ Round for next consensus protocol: 1
+
+ Next consensus protocol supported: true
+
+
+
+ StartNetwork complete
+ Primary node address is: 127.0.0.1:43613
+ Primary Node Address: 127.0.0.1:43613
+ spawn goal account list -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+
+ [online] W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE 5000000000000000 microAlgos
+
+ Account Address: W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE Balance: 5000000000000000
+ spawn goal account balance -w unencrypted-default-wallet -a W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+
+ 5000000000000000 microAlgos
+
+ Wallet: unencrypted-default-wallet, Account: W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE, Balance: 5000000000000000
+ Primary Account Balance: 5000000000000000
+ spawn goal account rewards -w unencrypted-default-wallet -a W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+
+ 0 microAlgos
+
+ Wallet: unencrypted-default-wallet, Account: W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE, Rewards: 0
+ Primary Account Rewards: 0
+ spawn goal wallet new Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+
+ Please choose a password for wallet 'Wallet_1_1617230151':
+
+ Please confirm the password:
+
+ Creating wallet...
+
+ Created wallet 'Wallet_1_1617230151'
+
+ Your new wallet has a backup phrase that can be used for recovery.
+
+ Keeping this backup phrase safe is extremely important.
+
+ Would you like to see it now? (Y/n): y
+
+ Your backup phrase is printed below.
+
+ Keep this information safe -- never share it with anyone!
+
+
+
+ One or more non-printable characters were ommited from the subsequent line:
+
+ [32mattract shy usage prison umbrella december sail finish struggle spring walk wisdom bread globe eyebrow admit typical tag december poet labor cable radar absent secret[0mWALLET_1_PASSPHRASE: attract shy usage prison umbrella december sail finish struggle spring walk wisdom bread globe eyebrow admit typical tag december poet labor cable radar absent secret
+ spawn goal wallet list -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+
+ ##################################################
+
+ Wallet: Wallet_1_1617230151
+
+ ID: 12dd4a15929ae17827788883ca77479d
+
+ ##################################################
+
+ Wallet: unencrypted-default-wallet
+
+ ID: ec9a33b376e4635705e1339deb6e799b
+
+ spawn goal account new -w Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+
+ Please enter the password for wallet 'Wallet_1_1617230151':
+
+ Created new account with address GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ
+
+ Account Address: GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ
+ spawn goal account list -w Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+
+ [offline] Unnamed-0 GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ 0 microAlgosAccount Address: GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ
+ spawn goal wallet new Wallet_2_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+
+ Please choose a password for wallet 'Wallet_2_1617230151':
+
+ Please confirm the password:
+
+ Creating wallet...
+
+ Created wallet 'Wallet_2_1617230151'
+
+ Your new wallet has a backup phrase that can be used for recovery.
+
+ Keeping this backup phrase safe is extremely important.
+
+ Would you like to see it now? (Y/n): y
+
+ Your backup phrase is printed below.
+
+ Keep this information safe -- never share it with anyone!
+
+
+
+ One or more non-printable characters were ommited from the subsequent line:
+
+ [32mcasual double chuckle method salmon talent cabbage maze parade luggage under elite pass best session sign december cliff master proud since crunch joy above jump[0m
+
+ WALLET_2_PASSPHRASE: casual double chuckle method salmon talent cabbage maze parade luggage under elite pass best session sign december cliff master proud since crunch joy above jump
+ spawn goal wallet list -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+
+ ##################################################
+
+ Wallet: Wallet_1_1617230151
+
+ ID: 12dd4a15929ae17827788883ca77479d
+
+ ##################################################
+
+ Wallet: Wallet_2_1617230151
+
+ ID: 2edbca9e4d78d43556f46cc991415da5
+
+ spawn goal account new -w Wallet_2_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+
+ Please enter the password for wallet 'Wallet_2_1617230151': 12345678
+
+
+
+ Created new account with address F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UAAccount Address: F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA
+ spawn goal account list -w Wallet_2_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+
+ [offline] Unnamed-1 F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA 0 microAlgosAccount Address: F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA
+ spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE --to GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+
+ Sent 1000000000 MicroAlgos from account W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE to address GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ, transaction ID: ZUOOECVQQBAMVGTSWHYJG5RGHEUEQJZHL2BUJTSL5YKYBYX5HJJA. Fee set to 1000
+
+ Transaction ZUOOECVQQBAMVGTSWHYJG5RGHEUEQJZHL2BUJTSL5YKYBYX5HJJA still pending as of round 8
+
+ Transaction ZUOOECVQQBAMVGTSWHYJG5RGHEUEQJZHL2BUJTSL5YKYBYX5HJJA committed in round 10
+
+ TRANSACTION_ID 1: ZUOOECVQQBAMVGTSWHYJG5RGHEUEQJZHL2BUJTSL5YKYBYX5HJJA
+ spawn goal account balance -a GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ -w Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+
+ 1000000000 microAlgos
+
+ Account Balance: 1000000000
+ Account balance OK: 1000000000
+ spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE --to F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+
+ Sent 1000000000 MicroAlgos from account W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE to address F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA, transaction ID: NAFNAJF5Q2OTTRVGFRV63GTIHPPDECXND3I6HES3FKJNALLMEDPQ. Fee set to 1000
+
+ Transaction NAFNAJF5Q2OTTRVGFRV63GTIHPPDECXND3I6HES3FKJNALLMEDPQ still pending as of round 11
+
+ Transaction NAFNAJF5Q2OTTRVGFRV63GTIHPPDECXND3I6HES3FKJNALLMEDPQ committed in round 13
+
+ TRANSACTION_ID 2: NAFNAJF5Q2OTTRVGFRV63GTIHPPDECXND3I6HES3FKJNALLMEDPQ
+ spawn goal account balance -a F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA -w Wallet_2_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+
+ 1000000000 microAlgos
+
+ Account Balance: 1000000000
+ Account balance OK: 1000000000
+ setting up working dir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work
+
+ writing teal script to file '/home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal'
+ reading from file /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal
+ #pragma version 2
+ int 1
+
+ spawn goal clerk compile /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal
+
+ /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal: YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA
+
+ spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE --to YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+
+ Sent 1000000000 MicroAlgos from account W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE to address YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA, transaction ID: IAS7I66XI6GH7S5RJVJ5ENTMSES62SQKODCF56N6QV7LYV3CCHTA. Fee set to 1000
+
+ Transaction IAS7I66XI6GH7S5RJVJ5ENTMSES62SQKODCF56N6QV7LYV3CCHTA still pending as of round 14
+
+ Transaction IAS7I66XI6GH7S5RJVJ5ENTMSES62SQKODCF56N6QV7LYV3CCHTA committed in round 16
+
+ TRANSACTION_ID_APP: IAS7I66XI6GH7S5RJVJ5ENTMSES62SQKODCF56N6QV7LYV3CCHTA, APP_ACCOUNT_ADDRESS: YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA
+ spawn goal account balance -a YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA -w Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+
+ 1000000000 microAlgos
+
+ Account Balance: 1000000000
+ Account balance OK: 1000000000
+ calling app create
+ calling goal app create
+ spawn goal app create --creator W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE --approval-prog /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal --global-byteslices 1 --global-ints 0 --local-byteslices 0 --local-ints 0 --clear-prog /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+
+ Attempting to create app (approval size 5, hash YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646A; clear size 5, hash YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646A)
+
+ Issued transaction from account W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE, txid JHHBKLA2OMDYMOPX4MTKQJ2G5HRTORQASBRS5MGPABS4RYE5R7KA (fee 1000)
+
+ Transaction JHHBKLA2OMDYMOPX4MTKQJ2G5HRTORQASBRS5MGPABS4RYE5R7KA still pending as of round 19
+
+ Transaction JHHBKLA2OMDYMOPX4MTKQJ2G5HRTORQASBRS5MGPABS4RYE5R7KA committed in round 21
+
+ Created app with app index 4App ID 4
+ spawn goal clerk sign -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ -w Wallet_1_1617230151 -i unsginedtransaction1.tx -o sginedtransaction1.tx
+
+ Aborting with Error: Timed out signing transaction
+ GLOBAL_TEST_ROOT_DIR /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root
+ GLOBAL_NETWORK_NAME test_net_expect_1617230151
+ Stopping network: test_net_expect_1617230151
+ spawn goal network stop -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root
+
+ Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root
+
+ Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root
+
+
+
+ stderr:
+=== RUN TestGoalWithExpect/testInfraTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/testInfraTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/createWalletTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/createWalletTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalTxValidityTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalTxValidityTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/pingpongTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+ expectFixture.go:157: err running 'pingpongTest.exp': exit status 1
+ stdout: starting pinpongTest
+ TEST_ALGO_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod
+ TEST_DATA_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+ network create test_net_expect_1617230521
+ spawn goal network create --network test_net_expect_1617230521 --template /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/TwoNodes50EachFuture.json --datadir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod --rootdir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521
+
+ Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Wallet2.rootkey
+
+ Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Wallet1.rootkey
+
+ Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Wallet1.0.3000000.partkey
+
+ Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Wallet2.0.3000000.partkey
+
+ future 100000
+
+ Network test_net_expect_1617230521 created under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521
+
+ network start test_net_expect_1617230521
+ spawn goal network start -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521
+
+ Network Started under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521
+
+ network status test_net_expect_1617230521
+ spawn goal network status -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521
+
+
+
+ [Primary]
+
+ Last committed block: 0
+
+ Time since last block: 0.0s
+
+ Sync Time: 0.0s
+
+ Last consensus protocol: future
+
+ Next consensus protocol: future
+
+ Round for next consensus protocol: 1
+
+ Next consensus protocol supported: true
+
+
+
+ [Node]
+
+ Last committed block: 0
+
+ Time since last block: 0.0s
+
+ Sync Time: 0.7s
+
+ Last consensus protocol: future
+
+ Next consensus protocol: future
+
+ Round for next consensus protocol: 1
+
+ Next consensus protocol supported: true
+
+
+
+ StartNetwork complete
+ Primary node address is: 127.0.0.1:37299
+ Primary Node Address: 127.0.0.1:37299
+ spawn goal account list -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Primary/
+
+ [online] 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U 5000000000000000 microAlgos
+
+ Account Address: 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U Balance: 5000000000000000
+ spawn goal account balance -w unencrypted-default-wallet -a 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Primary/
+
+ 5000000000000000 microAlgos
+
+ Wallet: unencrypted-default-wallet, Account: 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U, Balance: 5000000000000000
+ Primary Account Balance: 5000000000000000
+ node status waiting for Round 1
+ spawn node status
+ node status check complete, current round is 0
+ Current Round: '0' is less than wait for round: '1'
+ sleep time 0
+ spawn node status
+ node status check complete, current round is 0
+ Current Round: '0' is less than wait for round: '1'
+ sleep time 1
+ spawn node status
+ Aborting with Error: goal node status timed out
+ GLOBAL_TEST_ROOT_DIR /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521
+ GLOBAL_NETWORK_NAME test_net_expect_1617230521
+ Stopping network: test_net_expect_1617230521
+ Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521
+
+ stderr:
+=== RUN TestGoalWithExpect/catchpointCatchupTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/catchpointCatchupTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalClerkGroupTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalClerkGroupTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalNodeStatusTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalNodeStatusTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/statefulTealAppReadTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/statefulTealAppReadTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/corsTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/corsTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/goalAccountInfoTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalAccountInfoTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestGoalWithExpect/statefulTealCreateAppTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/statefulTealCreateAppTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+--- FAIL: TestGoalWithExpect (1538.34s)
+ --- PASS: TestGoalWithExpect/basicGoalTest.exp (37.79s)
+ --- PASS: TestGoalWithExpect/goalAppAccountAddressTest.exp (75.84s)
+ --- PASS: TestGoalWithExpect/goalNodeConnectionTest.exp (4.91s)
+ --- PASS: TestGoalWithExpect/goalNodeSystemdTest.exp (0.19s)
+ --- PASS: TestGoalWithExpect/multisigCreationDeletionTest.exp (8.93s)
+ --- PASS: TestGoalWithExpect/reportTest.exp (6.20s)
+ --- PASS: TestGoalWithExpect/statefulTealAppInfoTest.exp (21.86s)
+ --- PASS: TestGoalWithExpect/basicExpectTest.exp (0.01s)
+ --- PASS: TestGoalWithExpect/listExpiredParticipationKeyTest.exp (67.63s)
+ --- PASS: TestGoalWithExpect/goalNodeTest.exp (15.93s)
+ --- PASS: TestGoalWithExpect/doubleSpendingTest.exp (59.13s)
+ --- PASS: TestGoalWithExpect/goalAccountTest.exp (24.57s)
+ --- PASS: TestGoalWithExpect/goalCmdFlagsTest.exp (0.98s)
+ --- PASS: TestGoalWithExpect/goalDryrunRestTest.exp (37.95s)
+ --- PASS: TestGoalWithExpect/goalFormattingTest.exp (0.16s)
+ --- PASS: TestGoalWithExpect/limitOrderTest.exp (107.89s)
+ --- PASS: TestGoalWithExpect/tealConsensusTest.exp (12.31s)
+ --- PASS: TestGoalWithExpect/goalAssetTest.exp (41.96s)
+ --- PASS: TestGoalWithExpect/ledgerTest.exp (9.53s)
+ --- FAIL: TestGoalWithExpect/tealAndStatefulTealTest.exp (115.77s)
+ --- PASS: TestGoalWithExpect/testInfraTest.exp (3.41s)
+ --- PASS: TestGoalWithExpect/createWalletTest.exp (243.76s)
+ --- PASS: TestGoalWithExpect/goalTxValidityTest.exp (6.89s)
+ --- FAIL: TestGoalWithExpect/pingpongTest.exp (26.30s)
+ --- PASS: TestGoalWithExpect/catchpointCatchupTest.exp (207.24s)
+ --- PASS: TestGoalWithExpect/goalClerkGroupTest.exp (21.29s)
+ --- PASS: TestGoalWithExpect/goalNodeStatusTest.exp (18.58s)
+ --- PASS: TestGoalWithExpect/statefulTealAppReadTest.exp (47.93s)
+ --- PASS: TestGoalWithExpect/corsTest.exp (9.63s)
+ --- PASS: TestGoalWithExpect/goalAccountInfoTest.exp (102.64s)
+ --- PASS: TestGoalWithExpect/statefulTealCreateAppTest.exp (201.07s)
+FAIL
+FAIL github.com/algorand/go-algorand/test/e2e-go/cli/goal/expect 1538.381s
+? github.com/algorand/go-algorand/test/e2e-go/cli/goal/expect/catchpointCatchupWebProxy [no test files]
+testing: warning: no tests to run
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/cli/perf 0.131s [no tests to run]
+? github.com/algorand/go-algorand/test/e2e-go/cli/tealdbg/cdtmock [no test files]
+=== RUN TestTealdbgWithExpect
+=== RUN TestTealdbgWithExpect/tealdbgSpinoffTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealdbgSpinoffTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+=== RUN TestTealdbgWithExpect/tealdbgTest.exp
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealdbgTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+--- PASS: TestTealdbgWithExpect (3.29s)
+ --- PASS: TestTealdbgWithExpect/tealdbgSpinoffTest.exp (1.17s)
+ --- PASS: TestTealdbgWithExpect/tealdbgTest.exp (2.12s)
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/cli/tealdbg/expect 3.352s
+=== RUN TestStartAndCancelAuctionNoBids
+ auctionCancel_test.go:31:
+--- SKIP: TestStartAndCancelAuctionNoBids (0.00s)
+=== RUN TestStartAndCancelAuctionOneUserTenBids
+=== PAUSE TestStartAndCancelAuctionOneUserTenBids
+=== RUN TestStartAndCancelAuctionEarlyOneUserTenBids
+=== PAUSE TestStartAndCancelAuctionEarlyOneUserTenBids
+=== RUN TestInvalidDeposit
+ auctionErrors_test.go:32:
+--- SKIP: TestInvalidDeposit (0.00s)
+=== RUN TestNoDepositAssociatedWithBid
+ auctionErrors_test.go:123:
+--- SKIP: TestNoDepositAssociatedWithBid (0.00s)
+=== RUN TestDeadbeatBid
+=== PAUSE TestDeadbeatBid
+=== RUN TestStartAndPartitionAuctionTenUsersTenBidsEach
+ auctionErrors_test.go:290:
+--- SKIP: TestStartAndPartitionAuctionTenUsersTenBidsEach (0.00s)
+=== RUN TestStartAndEndAuctionNoBids
+ basicAuction_test.go:43:
+--- SKIP: TestStartAndEndAuctionNoBids (0.00s)
+=== RUN TestStartAndEndAuctionOneUserOneBid
+ basicAuction_test.go:84:
+--- SKIP: TestStartAndEndAuctionOneUserOneBid (0.00s)
+=== RUN TestStartAndEndAuctionOneUserTenBids
+ basicAuction_test.go:153:
+--- SKIP: TestStartAndEndAuctionOneUserTenBids (0.00s)
+=== RUN TestStartAndEndAuctionTenUsersOneBidEach
+=== PAUSE TestStartAndEndAuctionTenUsersOneBidEach
+=== RUN TestStartAndEndAuctionTenUsersTenBidsEach
+=== PAUSE TestStartAndEndAuctionTenUsersTenBidsEach
+=== RUN TestDecayingPrice
+=== PAUSE TestDecayingPrice
+=== CONT TestDeadbeatBid
+=== CONT TestStartAndEndAuctionTenUsersTenBidsEach
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersTenBidsEach/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDeadbeatBid/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDeadbeatBid/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersTenBidsEach/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersTenBidsEach/Wallet1.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersTenBidsEach/Wallet2.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDeadbeatBid/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDeadbeatBid/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(26744) : No REST API Token found. Generated token: 3a2ccc019ef899d0f74fc682edaae0fc21ebab457cc6d2124f6461b1fdbf4fce
+algod(26744) : No Admin REST API Token found. Generated token: 1e83a756b672c783104a82c4eb2c069b1210b1ed17d9e957d60a20a1b2a82c8f
+algod(26744) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDeadbeatBid/Primary/node.log
+algod(26750) : No REST API Token found. Generated token: 3f0ea30738cb6ec35d5c22675c966f8d1650dac5a7f0c5c6e17db3db8319708d
+algod(26750) : No Admin REST API Token found. Generated token: 9c7928831784c226e9ca13227c002226b22198b589eb1161d1a8617f78390f11
+algod(26750) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersTenBidsEach/Primary/node.log
+algod(26750) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(26750) : Initializing the Algorand node...
+algod(26744) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(26744) : Initializing the Algorand node...
+algod(26744) : Success!
+algod(26744) : ⇨ http server started on 127.0.0.1:44131
+algod(26744) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:44131. Press Ctrl-C to exit
+algod(26750) : Success!
+algod(26750) : ⇨ http server started on 127.0.0.1:41477
+algod(26750) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:41477. Press Ctrl-C to exit
+algod(26758) : No REST API Token found. Generated token: 8439e808f8a9d54e492b0cdd6933db1b7b30a8ade373e7120659df2c4394af16
+algod(26758) : No Admin REST API Token found. Generated token: 0007bda5dbf7a5e3474357bc8b05895d1f082fd7653611dd068000437b8d1a32
+algod(26758) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDeadbeatBid/Node/node.log
+algod(26763) : No REST API Token found. Generated token: 20ed80f6d65be56990c195ace475be18b45cace788c57e06e45871626fce9472
+algod(26763) : No Admin REST API Token found. Generated token: 0b2752e214fbefc5badd32ad0f31867c4c053410aad7b304d59496914b87c562
+algod(26763) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersTenBidsEach/Node/node.log
+algod(26758) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(26758) : Initializing the Algorand node...
+algod(26758) : Success!
+algod(26758) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:32923. Press Ctrl-C to exit
+algod(26758) : ⇨ http server started on 127.0.0.1:32923
+algod(26763) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(26763) : Initializing the Algorand node...
+algod(26763) : Success!
+algod(26763) : ⇨ http server started on 127.0.0.1:37463
+algod(26763) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:37463. Press Ctrl-C to exit
+=== CONT TestDeadbeatBid
+ auctionFixture.go:994: found a nonzero auctionID {AuctionKey:RHJM6E4PXICDIRFL6XZWMVSDEOOCXZHOMSCPW33EQNBTZOF3RD2DIJAC2U AuctionID:1}
+time="2021-03-31T22:31:19.888841 +0000" level=error msg="[Stack] goroutine 207 [running]:\nruntime/debug.Stack(0xc000130930, 0xc0000a84f8, 0xc000124690)\n\t/home/travis/.gimme/versions/go1.14.7.linux.amd64/src/runtime/debug/stack.go:24 +0xab\ngithub.com/algorand/go-algorand/logging.logger.Error(0xc000130930, 0xc0000a84f8, 0xc000170ae0, 0x1, 0x1)\n\t/home/travis/gopath/src/github.com/algorand/go-algorand/logging/log.go:219 +0x56\ngithub.com/algorand/go-algorand/auction.(*RunningAuction).PlaceBid(0xc00045d2b0, 0x82d637ead87e5090, 0xcb2674832468e080, 0x1ba05b89ed224192, 0x78baeeefb7240cb2, 0x186a0, 0x88, 0x0, 0x3404ba8f13cfd289, 0x23435666f3f5ab44, ...)\n\t/home/travis/gopath/src/github.com/algorand/go-algorand/auction/logic.go:334 +0x149b\ngithub.com/algorand/go-algorand/auction.(*SerializedRunningAuction).PlaceBid(0xc000494930, 0x82d637ead87e5090, 0xcb2674832468e080, 0x1ba05b89ed224192, 0x78baeeefb7240cb2, 0x186a0, 0x88, 0x0, 0x3404ba8f13cfd289, 0x23435666f3f5ab44, ...)\n\t/home/travis/gopath/src/github.com/algorand/go-algorand/auction/serializedLogic.go:82 +0x125\ngithub.com/algorand/go-algorand/auction.(*Tracker).placeBid(0xc000089d10, 0x82d637ead87e5090, 0xcb2674832468e080, 0x1ba05b89ed224192, 0x78baeeefb7240cb2, 0x186a0, 0x88, 0x0, 0x3404ba8f13cfd289, 0x23435666f3f5ab44, ...)\n\t/home/travis/gopath/src/github.com/algorand/go-algorand/auction/tracker.go:224 +0x326\ngithub.com/algorand/go-algorand/auction.(*Tracker).ProcessMessage(0xc000089d10, 0xc0001c3630, 0x3, 0xc00002f540, 0x34, 0xc00002f580, 0x3a, 0x3e8, 0xf, 0x3f7, ...)\n\t/home/travis/gopath/src/github.com/algorand/go-algorand/auction/tracker.go:167 +0xc46\ngithub.com/algorand/go-algorand/auction.(*Tracker).LiveUpdateWithContext(0xc000089d10, 0x1a50320, 0xc000084540, 0xc00027da20, 0x16fea81, 0x4, 0x0, 0x0, 0x0, 0xc0003136a0, ...)\n\t/home/travis/gopath/src/github.com/algorand/go-algorand/auction/tracker.go:311 +0x1ce\ncreated by github.com/algorand/go-algorand/test/framework/fixtures.(*AuctionFixture).GetAuctionTracker\n\t/home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/auctionFixture.go:415 +0x3d6\n" file=logic.go function="github.com/algorand/go-algorand/auction.(*RunningAuction).PlaceBid" line=334
+time="2021-03-31T22:31:19.893756 +0000" level=error msg="the amount of bid currency 100000 exceeds the deposited amount 10000, dropping message" file=logic.go function="github.com/algorand/go-algorand/auction.(*RunningAuction).PlaceBid" line=334
+time="2021-03-31T22:31:19.893986 +0000" level=warning msg="Placing bid failed, dropping message, err: the amount of bid currency 100000 exceeds the deposited amount 10000, dropping message" file=tracker.go function="github.com/algorand/go-algorand/auction.(*Tracker).ProcessMessage" line=168
+=== CONT TestStartAndEndAuctionTenUsersTenBidsEach
+ auctionFixture.go:994: found a nonzero auctionID {AuctionKey:3KGU5ICONSLT4V2WD4QY3CEKOFLN7DIJC7UKJAH2RHIPXWAZOYMXONMQUQ AuctionID:1}
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 15
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id CGB7TUNMBWIPUNAOBKGKIK3O6TSUBCEV7SMML2WZW25MENCXFY2Q
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 15
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id NU2G3SKFNG6T7FDNG44JD6ARGU6OBJPGRMGNG5KBP4VGON24KBPA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 15
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id UD4Z443BP2EZ6ERC5U47YGMDQJD5ADNXOKKJ2UPDRM6OXOG5TJLQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id LDTYDFSKGGFKKCQCVYQCMZW3GON2A3LRUK5G7NIK7N3R5HH7F5SQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id WYT4QUV7KXUCNFR3CE4AY7STC6FXMHVJUHHWLXUBKEYPNF3LNBXQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 65GCUE6VI5GDOZMUKE3QK2PV6XYKKAZAC6FQ64K66DBQSSL3N6BA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id RXP4ZBP4JST7S6F2F2FKTMQ4TXUXRPUSP5BZNVZYLP4B6VXPQI4Q
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 7EWNH4D25CGJWU6QLL3F5ALEQ5Z4ZP4R4YPNRHKZRRARSEZJZVAA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id QU5ILT2U4LIXONDARZQ4J3EV2L5FHWWWNCVJVNMTRHJP4UW6AEOA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id PWIKAVTNVUFDLAJKTF34BIRD5VZDJ5IQKAC7BKCTGFCQ4XGM7W7A
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id AKCY4RQ2PTOVCMNSB4IJ6RTXEKQDLFAHBIA6JYGRXP7D3Z6H7YOA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id QWXPLTSGGSHKXMRX3SYLLAZUNOKUT5VYI7GG3SZ67UDH7IKTHFDA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id LP673HDNDP2AJAXM4ASOMWK4I4X7CLNSUR6KW4OSWEUEGHMWKOHA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id YRR53AJYGRWKTA4UGIBZRQVWDJ2YHI7MTFOGXDLI2RAIWSGVGDPQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 5IMV4JLCADFHB2FLS7S4COZRS5NXZIGNRDR5A5S6PREWYGFCATTA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id OYD6IYRWNHGR7PUZILJMBD7AM7SKFL33IER5FV4IGTGSPG2CFK5Q
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id DK65RHFW3VA373QNGK3FS3ZFWZ7XS2B25P52Z3XOYD2AG6VE5ZUQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id UB52J7STY76ZO4NO2TW5FMZOEKUGLT3ZETS56RRPIEGBG75OVKXQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id X644QZAFMREINVUEIANHG26DYQZ7LEH3MTO4WGCFDLV57TBSZCAA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id HQJGQDDB5KB6HGJWTLTBB2ZAJIYKYM4A6Q3CPVONV3X7CD2WOP5Q
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id T3BFYRKQDLCVWXUC4H345R7W4CWPRDO7Q33J57XFXHI72I3YD3SQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id SUSPQOTYBWDD57O67PHYNJ7ZIKTZAMFMB35PF6LDJNOSOWCSQUFA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 4IG5JEPFUBMZOD6RHLMPQKDAYXWTAQP7GLSOV5UQPXR4VKEBHD6A
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id V5LIOKNO6K345DZGJJTWT7LNWMDON5DFIGIFUNW5XZ5QFBMSZHTQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 366F6G54WMJFW6NX5JKUZLMQAHHY7KNKK5KV637WHONMCQI56MUQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id ZVUIYLVP4UWXTHCKBRAYMZBKC46Y7DBTQGIRGEHKNIOI4YPCEKHQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id CXCTZGQWZCCVLQ4NA4TBDJW2XSGN5CHH44E7XO2FZ4E7GL23C55A
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id BY6FOP5LIFYDAPMFQDLXJT5O2IRUZTY5TEVUQI2UEXKYRWPEZVWA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id GBMV2ZE5N5EDBI2E773BB6CFRLJRTMAPYQGPEDMBYI4WEUKF54TA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id PEW5FWPBA2ERKQSUIUY73V3C4GE7OB4ZKRW7F6AC5BWKE5FQ2STQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 2ZHHYXL3ZLMQKIDJCGGRNIFFXN3O7D33EMFHAWOMVCULWKVNOIQA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id LHSERBR7WD5BXI6RLT65CJ5FT3PXSNDM2RYX5NJYADZ6Q4YVGSIA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id GBSXFR32HVP7EYENSQKMTAKH6TONDRV5ELLPFC2PYX7ZK3I6DWLA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id YDVA45GPEPMBFNGBNDDBX5NFKV2QQK3ENBVDZ5EPRFPSGYOMB3TA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id TEWZ5HFEVJ65Z42CQTLIVISAHKKEF3A5VA7F5K2DKDVEYVTHBM6A
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id OZJTB5H7DD7GAHCQLKNM3KZI6V75GVNXIY2UVFGMOVG24TZZEGBQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id CYRNDRKS74LGAE444UVPBDDSA6V5CFNGWD775HHKIP2YWGTOYOVQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id U2MSW2Z3QJWAPSZHNO7ZNSKDHSQT45QDSATSHSLDANMD6XQVLZHQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id DJSAIJXZTHPALA6KL3W3S2NYDEIZ2462MZEOLO3KIH6ABJ6NII4Q
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id S52YCAX4LVMRIWTLDIYYENQDQ4FCVQDP36XK5MKWXFB65RB5FHPA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id DAO35U7DOCK4LO4GA5DUHRL6USDQLQ4UWW4Y7XFFJP4E2Q3AN26A
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id HVKFQM5LZMKPKDOLA4QY3W6K3WAMWWL7IS6U2NTU6QOAB2XHIZTA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id L2FSE2EDMDYGN44RZS6JDZCN7ZI6JLJRYX36JN53FGXORLLLVM4A
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id O7SSQIF4GGGR4R4TGKVBLASIT2LMDYJCGP7CJSKZMFOLHFN7F32A
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id JULDLQLYYV2RBO6DXESQCZE3RQSTZUZLHCOUAQAX5E5QGINKP3RA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 2G62PRT5UW2EMMQVJRXAGLWBKMXV4VOCLB4PJAQSZOX4NC4HG5AA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id RJ3U3RUFFITOXIXZQH4SR6XTJOB5HMXFOIAJKXXLPU5RYUKBEHJA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id U44JE4RH5LATJNN7CK5Z3VPWUWBFW7LREDL2YGIRII34FWXIL2IQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id BC6MJPVNI6J2MOPM5VHMDBF6QR6BBH45DLGM5KL3WQG32XQU4BCA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 6D4HH2BEFTPCN3WKFSZCOWKHEGMD65M7ETBFGL376ICZWUER53AA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 4CHWXVGXGYSVSPTET342JVGZKGOQTIEJRVBKXYGHEYOVECMJN5IA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id J7AYF27HWS5OM7BWMX43JFYSFPX6FXKPQKJ5AMNR4ZBKZQVMLTOQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id ICMNIXYDQPZKMXVA3N4ZVNYC2LL7W2UF36RGWJG4E2KUGJPTNSUQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id BBVLO3LRN4M5TVSKB3MNB2JNK2E5ZMGQB3E23NQNWTHTLTDAWL7A
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id CEM2RBZSBAFVYMLXUITY7VHCUUJBXLIJEBQ4LJORVA2DT3V5GIEQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id SXQU5J6X6U4IZSZYJSFUZQCBDEMNX6YKLIVSCSYILWLZZPQ4QFQA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id AKASPK4ZUCLDJU7PPONCB2OYUUPB7TYBMJBAF5DIHLSQ36QB6MXA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 2AGH7ADWOCAMAIB7MDT5KDDS7JA53NYRI6IMBSCORD4WZ5KDXZPQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id WBDMLAVORJTSVDHGZTDTXNI3GQ2X4GHWQGO3KT4AKZE3OYQF2ESQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id WFV3XUJG337PTWV7XNM3PUPVQOQXVHYAOZY4RXU4PIVLQLG7RXBA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 37TZLJPRSADL6K4V3DLDESUA4MVWAZ7U3XRLDDUUDPPE2GFIFDLA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id JLT7IJAL4SQFDZEZUHFVK3MSLV7RNH2Y3V4FHW3JI2IW5I6SRRUQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id LAKSTB4LZVCJYC7S2YEU2VGCF4BBURPWAYITWQCL3WZ5SXBWZIVQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 7HDMRVY7O3EVIIQ3K6NFFRZXX5PZJSBCS6ESVNEGNUY3PNANLWZA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id GZVNBDVAWHH525CZZCMUW5JJZIGM7BCDXSUEOZOFI2OXSP5LCZPA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id HGPBR35AZHRS7WWXTYU264ULMGYBLMPXX6LHTO2G5425U5BLJZ7A
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id Z3HDAIJGLK2QYTO43W2MFOG62CITZKF7AZVXDCCGBYEKC3ZE4EKA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 63TLUX2UAO6Q27B7D4FEDVQUNCIP4AQMOZEOQXL6A7KAID3YSDYQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id ONY3H754A7V7G7MUETMXKCM3SXLIAPAF5RMBGMXUSJ2FNLGMSJZA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id E5QWBRBP3DEVNIA4TTFM3BX6ZJOHD2R4SB7HJUBXHLYRNNVBAZ6Q
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 5PW3OPK3WXEIEBFJYFQB4VSYODY2ZDBYFPY4MU7RHI3BLZHXBCQQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id XGHVKYBOOHYYR3O5LQ5HZYNJMUWCY3HILNWWYBJ4TOQ4DDYRIJAA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 3F76YEBNYVSVXUMOVL5KG64Q7I3FX4L4VWFFTYW4ZHGE4W5YNSKA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 4J32COCVUKGCTFHRXHI2JCXZNQL5L7UJN6V2AIXWX4DHCLLMMWLQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id RRPW3LVIYVGCN6NOVC57F3SSEP2YO2HDY6A7JMRCVRXN5FRESL5Q
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id PXGETO7VS6LITF4BUX6I2VZTACV54VNNU7HVVTX7ZXFVNBUYW63A
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id RQ2YKODV3MSMZIE7EFDVWSH72JTDLJFV72P24DIVJ6LZWXET7NMQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id IHYQ4LHYT2UTJF6QHRTMEST2EEQPDLI533CJEA6T2MUQ2OTPHIYA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id SOAUDXGXXPMD4XDM56MDOZHWKEWPROP45BONZ5IXGJ42OHT5T57A
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 62XBOLGXSBITGTRWJ2LSG6EIIQUV544FWNR52RV5UIHLAORAYC7A
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id PLSJK2C2FUTFLKQQSO3G3QT3Y4VDDQZK67ZIC4NVKSZKHO3ZCXEA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id H2AB2X4VWXV2AO6VL4LYHPJNCT77LS3VRZV6LTYOZYQY3HU5P7ZA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 54C36BZBPGD5H2MQ6MWZPZJSGEJ2EJWXIQ4MLGZ3MKBJN2MT52XQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 7PUP2AAPT2G644Y6VEI4Q5OGTGJANH7PPWTYBCKXB5LJORLVC66Q
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id ROXLZA6LKUTOREI4DQLSESGH77EIEHWFFFDBIR2J3ICFBH6UKQTQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id NRPC2KPHMSRHDZDOYSKH5QAY2JQVMIVNBXF4VHNGZI3SF37M5LCQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 475GRHPZV52BHDGRLIH7ZLMRTUAERUHW7Y2KL3XHOAHE5CGNKJWA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id ECIWDHV2QELLN6UREXXMDWZOFGF3JHKTHJ4VTPSPZDGQCJ7B7OHA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id UH66VKFZIFSAP5237TPW5ND5U67LBQR4HUJRJ2FD7PSNNFJTWGUQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id IN3AQ5OHFUEXC6PDV534QJV5C2EC7X6QVDH5O3DNA3HSNYGCFPWQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id H5SGUVB2HWP5BN7N6E27YW5MQWKJ6H476DQFJKIM3PJ7G656FQEQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id TLDHT7EYAIIDJSJK56PCCII6OPRX2EPFSBGQGVP564TARTOJLGXQ
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 2CUL4G2T7ERYYC7VPKIAYV4D5ZKPJTNQ7BAL5HLBEBLOLQHIRZAA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id PXATHVGK7JNMUUMPTUGM6M6UNYKWUTSNMVT7YWIP52L5X4RWMXDA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id NDMFOWC3I3TBCQQG576DF2ZSIUBLTYSRRQJDLBBCEI7ZLL5XQJSA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id CEU2E2VWUAHUGI6343K3U4WFYKY67VZX5CB2LCVBUGRW4D7GTFWA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id JQZCOCSUMG6MXHRJRXTZAU7RQCQRC6UDHEQCXU7APKUBYKMSCLGA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id RXS5GZITGCS3B3YEJPZDPD6HENWSJXT7GQFS6ZUTFYMIZLXIZ6XA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 2BCUY4DGZM2PGBZZ7NJ4X64V3P5ED7ZUSAW7GTHBT2FI5Z4APCKA
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17
+ basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id C4YIVO653QEPI3T4KHUZRK5BAOABIUO7NSNWCA2JMBEMLM7N5ZBA
+algod(26758) : Exiting on terminated
+algod(26744) : Exiting on terminated
+=== CONT TestDecayingPrice
+--- PASS: TestDeadbeatBid (142.06s)
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDecayingPrice/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDecayingPrice/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDecayingPrice/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDecayingPrice/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(28489) : No REST API Token found. Generated token: 65802ac34482efb6c1a8a71eed19b406fcb9573b3648ca84b4e25b25525b3a9e
+algod(28489) : No Admin REST API Token found. Generated token: 7c3d27f385e1ae2395d7206931a5b586bdd8e08c4750bb6584d6f40bbbcb6d25
+algod(28489) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDecayingPrice/Primary/node.log
+algod(28489) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(28489) : Initializing the Algorand node...
+algod(28489) : Success!
+algod(28489) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:38547. Press Ctrl-C to exit
+algod(28489) : ⇨ http server started on 127.0.0.1:38547
+algod(28497) : No REST API Token found. Generated token: 20e5cd50647585f93dced7555bd8f635daa7c61ba00c1cd465e9f68ff62be7d9
+algod(28497) : No Admin REST API Token found. Generated token: b406afd1c57c77fe72b9cba3c8b69a3d786047bce8f450edb4b67334b92bdcf9
+algod(28497) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDecayingPrice/Node/node.log
+algod(28497) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(28497) : Initializing the Algorand node...
+algod(28497) : Success!
+algod(28497) : ⇨ http server started on 127.0.0.1:39333
+algod(28497) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:39333. Press Ctrl-C to exit
+algod(26750) : Exiting on terminated
+algod(26763) : Exiting on terminated
+=== CONT TestStartAndCancelAuctionEarlyOneUserTenBids
+--- PASS: TestStartAndEndAuctionTenUsersTenBidsEach (168.01s)
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionEarlyOneUserTenBids/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionEarlyOneUserTenBids/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionEarlyOneUserTenBids/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionEarlyOneUserTenBids/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(28653) : No REST API Token found. Generated token: 5848a5c5a29c04aa478a9df0294e82299b5d21195f5e575a49b4dd0ac9b7bda6
+algod(28653) : No Admin REST API Token found. Generated token: 1c1fac49d071271fdbbe51a79b0d191ef6ad526c1018b70fe447cb5fbb940c1c
+algod(28653) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionEarlyOneUserTenBids/Primary/node.log
+algod(28653) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(28653) : Initializing the Algorand node...
+algod(28653) : Success!
+algod(28653) : ⇨ http server started on 127.0.0.1:8080
+algod(28653) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(28660) : No REST API Token found. Generated token: 6bf7fc768944d2aa31a3bab04c5fdd9f98724afc07e25ea1c9f48ce3cad79ea0
+algod(28660) : No Admin REST API Token found. Generated token: 6dfb49cdd467ce5d191f8055e5a8fe7096919d294f20d3bb41e3bd9701b47cc1
+algod(28660) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionEarlyOneUserTenBids/Node/node.log
+algod(28660) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(28660) : Initializing the Algorand node...
+algod(28660) : Success!
+algod(28660) : ⇨ http server started on 127.0.0.1:33663
+algod(28660) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:33663. Press Ctrl-C to exit
+=== CONT TestDecayingPrice
+ auctionFixture.go:994: found a nonzero auctionID {AuctionKey:ZWCTP2VWOXCNIVI6WGL2GKRPNNOQBGW27RE2ELYW4KH2Y76DV4AMWKHQ34 AuctionID:1}
+=== CONT TestStartAndCancelAuctionEarlyOneUserTenBids
+ auctionFixture.go:994: found a nonzero auctionID {AuctionKey:KABBWCKKTSN7GVUKXWZLQRJ5UK4KK73K73XQK4B3UBHTRHO2Z5QOQZJYFY AuctionID:1}
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12
+=== CONT TestDecayingPrice
+ auctionFixture.go:388: warning, at.LastAuctionID() resulted in error: no auction has been seen yet
+algod(28497) : Exiting on terminated
+algod(28489) : Exiting on terminated
+=== CONT TestStartAndEndAuctionTenUsersOneBidEach
+--- PASS: TestDecayingPrice (108.82s)
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersOneBidEach/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersOneBidEach/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersOneBidEach/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersOneBidEach/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(29377) : No REST API Token found. Generated token: 2d1fca81c5fa6da76be3047986e88f3bfbacd70105e690590955717a694690d1
+algod(29377) : No Admin REST API Token found. Generated token: d07983f9682031626c657ad6b927863f27286b217d83dff1c0997b3cea122a91
+algod(29377) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersOneBidEach/Primary/node.log
+algod(29377) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(29377) : Initializing the Algorand node...
+algod(29377) : Success!
+algod(29377) : ⇨ http server started on 127.0.0.1:42947
+algod(29377) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:42947. Press Ctrl-C to exit
+algod(29384) : No REST API Token found. Generated token: 3765fce59e5025f00f67051bf921ae19516c1b33c94c0e66fb90f1c26a4dfbe8
+algod(29384) : No Admin REST API Token found. Generated token: b41220cb5d6624098050072c54f687b15849f6af343d4cb88b90dac4bc78ad68
+algod(29384) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersOneBidEach/Node/node.log
+algod(29384) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(29384) : Initializing the Algorand node...
+algod(29384) : Success!
+algod(29384) : ⇨ http server started on 127.0.0.1:37789
+algod(29384) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:37789. Press Ctrl-C to exit
+algod(28653) : Exiting on terminated
+algod(28660) : Exiting on terminated
+=== CONT TestStartAndCancelAuctionOneUserTenBids
+--- PASS: TestStartAndCancelAuctionEarlyOneUserTenBids (91.91s)
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionOneUserTenBids/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionOneUserTenBids/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionOneUserTenBids/Wallet1.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionOneUserTenBids/Wallet2.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(29452) : No REST API Token found. Generated token: d1ef602ac47b3e6c5ada5a2154f1adaa27dd54df8fc0a365ae933951819d4043
+algod(29452) : No Admin REST API Token found. Generated token: c1d845641585dffee2bec94dd1825aaf5e456797859f870e3e907b05c6503b56
+algod(29452) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionOneUserTenBids/Primary/node.log
+algod(29452) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(29452) : Initializing the Algorand node...
+algod(29452) : Success!
+algod(29452) : ⇨ http server started on 127.0.0.1:8080
+algod(29452) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(29483) : No REST API Token found. Generated token: 1da6a05ae355f2c698aed17e2caf8f0a56da9bf9274f517e5674ad50a56e0b2c
+algod(29483) : No Admin REST API Token found. Generated token: 12dac1b982712da265a272a5d6e5c1d0356af055ecbbde7a6aec0d671e0353f6
+algod(29483) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionOneUserTenBids/Node/node.log
+algod(29483) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(29483) : Initializing the Algorand node...
+algod(29483) : Success!
+algod(29483) : ⇨ http server started on 127.0.0.1:33379
+algod(29483) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:33379. Press Ctrl-C to exit
+=== CONT TestStartAndEndAuctionTenUsersOneBidEach
+ auctionFixture.go:994: found a nonzero auctionID {AuctionKey:KLZIKVEOD55R3YN2XECKECQHFF5VLQASW5WHHJQYQGEWPWICFMHWUQXMKM AuctionID:1}
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14
+=== CONT TestStartAndCancelAuctionOneUserTenBids
+ auctionFixture.go:994: found a nonzero auctionID {AuctionKey:PWHI3UIPO5VNU7YBCYTOV7JA37RDAQH7SXMSMEG6CJ43FG5BDOKCXQ7RSE AuctionID:1}
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+ auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16
+algod(29452) : Exiting on terminated
+algod(29483) : Exiting on terminated
+--- PASS: TestStartAndCancelAuctionOneUserTenBids (135.28s)
+algod(29377) : Exiting on terminated
+algod(29384) : Exiting on terminated
+--- PASS: TestStartAndEndAuctionTenUsersOneBidEach (155.63s)
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/features/auction 406.589s
+=== RUN TestBasicCatchup
+ basicCatchup_test.go:35:
+--- SKIP: TestBasicCatchup (0.00s)
+=== RUN TestCatchupOverGossip
+=== PAUSE TestCatchupOverGossip
+=== RUN TestStoppedCatchupOnUnsupported
+ basicCatchup_test.go:198:
+--- SKIP: TestStoppedCatchupOnUnsupported (0.00s)
+=== RUN TestBasicCatchpointCatchup
+ catchpointCatchup_test.go:83:
+--- SKIP: TestBasicCatchpointCatchup (0.00s)
+=== CONT TestCatchupOverGossip
+ basicCatchup_test.go:100:
+--- SKIP: TestCatchupOverGossip (0.00s)
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/features/catchup 0.064s
+=== RUN TestCompactCerts
+=== PAUSE TestCompactCerts
+=== CONT TestCompactCerts
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet0.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet3.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet1.0.3000000.partkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet4.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet3.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet0.0.3000000.partkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet5.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet6.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet2.0.3000000.partkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet7.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet4.0.3000000.partkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet8.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet5.0.3000000.partkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet9.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet6.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet7.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet8.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet9.0.3000000.partkey
+test-fast-compactcert 100000
+algod(30825) : No REST API Token found. Generated token: ba77c334538ad9c8efcabcc4219c3eb8aa72eaf8a01dd3017d3da24e5b2c57c3
+algod(30825) : No Admin REST API Token found. Generated token: 76c72b69eae44bc7b2e837355b010323a7c3a558a3499e1f4cb61b23b6098431
+algod(30825) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Relay0/node.log
+algod(30825) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(30825) : Initializing the Algorand node...
+algod(30825) : Success!
+algod(30825) : ⇨ http server started on 127.0.0.1:8080
+algod(30825) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(30833) : No REST API Token found. Generated token: 9d19b37ef7dc396ecef42ffc95a745f2028c41edcdae2179dd1c861281e7a4ee
+algod(30833) : No Admin REST API Token found. Generated token: 79113ae59d82f616e00348a996ee32032eaa6b3fcfb3cdbfcc506b648ea9e10c
+algod(30833) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Relay1/node.log
+algod(30833) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(30833) : Initializing the Algorand node...
+algod(30833) : Success!
+algod(30833) : ⇨ http server started on 127.0.0.1:42427
+algod(30833) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:42427. Press Ctrl-C to exit
+algod(30840) : No REST API Token found. Generated token: 1343191004a68d29865f648c9e96a7094f43b7634befd54d8e18c0072958bf77
+algod(30840) : No Admin REST API Token found. Generated token: ce874573e75eb4a1ee711babac98821b5c9fd8bfd1c8e60ea64fa112b071d3bb
+algod(30840) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node6/node.log
+algod(30840) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(30840) : Initializing the Algorand node...
+algod(30840) : Success!
+algod(30840) : ⇨ http server started on 127.0.0.1:43771
+algod(30840) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:43771. Press Ctrl-C to exit
+algod(30849) : No REST API Token found. Generated token: 0626b8160608e3e40642e7757889b4301c64c16b4eb789373cb356f414380579
+algod(30849) : No Admin REST API Token found. Generated token: 4f8c251dc96c4dd7d1fc9c09dca67034f1111bab420f852665b8d934074a892c
+algod(30849) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node7/node.log
+algod(30849) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(30849) : Initializing the Algorand node...
+algod(30849) : Success!
+algod(30849) : ⇨ http server started on 127.0.0.1:35167
+algod(30849) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35167. Press Ctrl-C to exit
+algod(30856) : No REST API Token found. Generated token: f065a2be901f2a9bde7918a92826ce00d58a75a88cdf57bb23ec719f72ab587d
+algod(30856) : No Admin REST API Token found. Generated token: 75f4f0712e34eb97373dd3106b8e1073133ece2b8c64d91032c8e487bfad17c8
+algod(30856) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node9/node.log
+algod(30856) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(30856) : Initializing the Algorand node...
+algod(30856) : Success!
+algod(30856) : ⇨ http server started on 127.0.0.1:35443
+algod(30856) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35443. Press Ctrl-C to exit
+algod(30867) : No REST API Token found. Generated token: b9d387405daa5eadd532cff740de842d251578c95e1bcb3f92410cafb10c463c
+algod(30867) : No Admin REST API Token found. Generated token: 70eb1d6892b0ca5bae1f8415be9f8f83eb2b93998c363a73dc44c8a45f9db0b7
+algod(30867) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node0/node.log
+algod(30867) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(30867) : Initializing the Algorand node...
+algod(30867) : Success!
+algod(30867) : ⇨ http server started on 127.0.0.1:35497
+algod(30867) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35497. Press Ctrl-C to exit
+algod(30879) : No REST API Token found. Generated token: 7adfa0dca3dc5a1aec7b78086f426ce943aa0d333a4413b11affd47c17ad1638
+algod(30879) : No Admin REST API Token found. Generated token: ae06ec29916724301b9ef86be6c33506450b3d6281a5475b51c17dc37122178a
+algod(30879) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node1/node.log
+algod(30879) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(30879) : Initializing the Algorand node...
+algod(30879) : Success!
+algod(30879) : ⇨ http server started on 127.0.0.1:46197
+algod(30879) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:46197. Press Ctrl-C to exit
+algod(30903) : No REST API Token found. Generated token: ee4042840c681ea8a264e69eb86318fdbaa4588a91b9245f7d08e12e08259b6d
+algod(30903) : No Admin REST API Token found. Generated token: c8753ee4ec25611c36b409dec13b7bbabe0ef758fe9d43301fc18c69d3bac00d
+algod(30903) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node2/node.log
+algod(30903) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(30903) : Initializing the Algorand node...
+algod(30903) : Success!
+algod(30903) : ⇨ http server started on 127.0.0.1:45239
+algod(30903) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:45239. Press Ctrl-C to exit
+algod(30913) : No REST API Token found. Generated token: 8d5dfa5f3d9bdfc4e45c2cb45ab402c93d58316342c86ca9125ede633ed73783
+algod(30913) : No Admin REST API Token found. Generated token: 502f369399d6e2e3ccfb5eb890341c55956f8454a50309b7c8fcb13e2ab1264b
+algod(30913) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node3/node.log
+algod(30913) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(30913) : Initializing the Algorand node...
+algod(30913) : Success!
+algod(30913) : ⇨ http server started on 127.0.0.1:44425
+algod(30913) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:44425. Press Ctrl-C to exit
+algod(30921) : No REST API Token found. Generated token: c4f6c0ed538352858dfcf336663ec2a62f0ee72ffbf30014f1692436039f8932
+algod(30921) : No Admin REST API Token found. Generated token: 61a67e0777aec9e91d4df76169a4545866810bc80600e904ce5ae70895471dcd
+algod(30921) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node4/node.log
+algod(30921) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(30921) : Initializing the Algorand node...
+algod(30921) : Success!
+algod(30921) : ⇨ http server started on 127.0.0.1:34597
+algod(30921) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:34597. Press Ctrl-C to exit
+algod(30930) : No REST API Token found. Generated token: 957fd73808a15128bf7f43ccb8c67502c732ebe0cd9a4d726f8eaa50eb2066e3
+algod(30930) : No Admin REST API Token found. Generated token: 14a1f3e64476669e90f64162554965dc5cc6b44c5a5b16d6e1fa43af81d96216
+algod(30930) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node5/node.log
+algod(30930) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(30930) : Initializing the Algorand node...
+algod(30930) : Success!
+algod(30930) : ⇨ http server started on 127.0.0.1:38789
+algod(30930) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:38789. Press Ctrl-C to exit
+algod(30941) : No REST API Token found. Generated token: 0377f157f49c15136a97bbd439e499b36c6cfca945dfdc706d41c0d06ed74155
+algod(30941) : No Admin REST API Token found. Generated token: 022ac82c54333541141ea25526e319c03220939483305aa08e61bb58c00c1d98
+algod(30941) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node8/node.log
+algod(30941) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(30941) : Initializing the Algorand node...
+algod(30941) : Success!
+algod(30941) : ⇨ http server started on 127.0.0.1:35783
+algod(30941) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35783. Press Ctrl-C to exit
+ compactcert_test.go:89: Round 1, block {MXLPJWL5W5AEGA5NSZTEYOGLRPKZNOTTNBZ4XGAVVLTO2OQVLYEA ZVHVCLH63QH2JF3WMYD7UJLIIKH42TTMY55WF7IN36LBPDHM5MSA ZAQHQIY63IW2SEYXW3AA3Q6FEX5ZFP6TYATKGLGONRDFLU5NH6TA VXFGMS6M2Q23CACTDBXDT6KIO23EYB2DYJX5EHE34OIB2WDP567RQA24NA 1 0 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 0 249999999 249999999 {[]} 1617230231 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 2, block {WNFU3PX6AVYJNT2LF3QQ62FYIGX6D42S5N2FPROWELTKROFTK5FQ MXLPJWL5W5AEGA5NSZTEYOGLRPKZNOTTNBZ4XGAVVLTO2OQVLYEA ISRR6S6HALLUCL4GLBJL2N7A57JRJJ6DBFTEA56W2XOQIYZ4BLYQ VXFGMS6M2Q23CACTDBXDT6KIO23EYB2DYJX5EHE34OIB2WDP567RQA24NA 2 0 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 0 249999999 499999998 {[]} 1617230256 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 3, block {WJU7K23P6IIPL4HIKHP35CVOVUUX2C3BNJAGT5ORMSUYHX5I2HTA WNFU3PX6AVYJNT2LF3QQ62FYIGX6D42S5N2FPROWELTKROFTK5FQ OK4UTYM4C2ZWI7RS27G3YXXSERZWG3ZRXQ6VUKVQEWIIVV625TMQ VXFGMS6M2Q23CACTDBXDT6KIO23EYB2DYJX5EHE34OIB2WDP567RQA24NA 3 0 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 0 249999999 749999997 {[]} 1617230281 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 4, block {RYG5TI7X5UEUDSFNFEXY5PMWK7RUZJLOYLF3QG5WAOCJRQEZVIAQ WJU7K23P6IIPL4HIKHP35CVOVUUX2C3BNJAGT5ORMSUYHX5I2HTA IWD3FBOHBTKZVDHWVB75U25C3NURDDVNYYYT3BLQPZXTOIW7EXKQ VWNC7Y3JJM7UL2SMZOM62M3AAK2NSO2G4RB45JIII7DU5VJE3PQKZ62K4E 4 0 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 0 249999999 999999996 {[]} 1617230290 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 5, block {4A32O4MR3CF2NLTRIJTS2HBQPM3WL4BCM3NSI7RFUPWV6DOLN3OA RYG5TI7X5UEUDSFNFEXY5PMWK7RUZJLOYLF3QG5WAOCJRQEZVIAQ NTH26GJB3YLVHU3S2UJWMDCMELBFMTAYKJBCUIZMEUQVTIOUKM5A VXFGMS6M2Q23CACTDBXDT6KIO23EYB2DYJX5EHE34OIB2WDP567RQA24NA 5 0 AYM5ZM6BDR22ARY5XCOOIOSH45OE2RDHU4BUBO56C2MAOACZHMGQ 0 249999999 1249999995 {[{pay JES7FZRVNLJJ4MEQXFDWWAAD77STXUO7TQLKL6W3NJRSFOS5NT2A 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 4 1004 [] [] 5 <nil> 0xc00049ae80 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []} {pay CHOBLMAPA7R6NLPHSIIYQFNBYGW2FT44VVOPBRB2WKTBWVKSP56A 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 4 1004 [] [] 5 <nil> 0xc00049aec0 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []} {pay 5M2WGIRDJLCDEKPZO3QRBZCRUOKBZDGOXSIYA65LCVXGGL4GH75Q 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 4 1004 [] [] 5 <nil> 0xc00049af00 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []} {pay SIME6DZXLPLAGEPHNEMUSLLTTWSDKJR5YXHN4HYJCPALT4H4UD2Q 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 4 1004 [] [] 5 <nil> 0xc00049af40 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230296 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 6, block {AAKLNUV2PV5RUBHTSYGZ2ZDEKSNPHV52YEM3E5WGVF7L7J4SJFIQ 4A32O4MR3CF2NLTRIJTS2HBQPM3WL4BCM3NSI7RFUPWV6DOLN3OA QK6LYQ22JQYETNNJDO6TOL3JBID4XBQWHH3MXK2EPTO3OFJZIZ2Q VXFGMS6M2Q23CACTDBXDT6KIO23EYB2DYJX5EHE34OIB2WDP567RQA24NA 6 0 SEI6KQQT3J2I7GZMSH356OJ2MMWP32FPTUPNWJSSHXNZELAV4MHA 0 249999999 1499999994 {[{pay V7QLLCZF7IC2R2VBL6OLDR3DTZT47OECW6NULGAHTCERU3DYRFRQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 5 1005 [] [] 6 <nil> 0xc00049b980 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230300 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 7, block {UFQ4Z7N3WRW7RFKSQA2H3NMN3OGZT523JBSJDQIGNXKWMON6OLDA AAKLNUV2PV5RUBHTSYGZ2ZDEKSNPHV52YEM3E5WGVF7L7J4SJFIQ FCNZQYRZY3WNWZB3P4PIZGHPXZLDQBXGHMS3AEBX63FZNAUK47PA 5IJMRPGGPKTTJJ42PYNELMYJMRBIEM2TGXEVC4W7IR7IYCLK6UT3EYMJYQ 7 0 HEW5FKO6HXF6467OXUVHKAHAEVOSLSBR4O422MCE5F4SU2KQCZZA 0 249999999 1749999993 {[{pay FCXI56BQ7YMAQHPKW63OO5CO5XRC6ZHYSNP3V5LIGWFZSJ45J2UQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 6 1006 [] [] 7 <nil> 0xc00049a280 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230306 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 8, block {7DUHCPHES25PPKLZPZRNQOLZN2NGLIP7XLWU4G3JPHB6JCJDCYBA UFQ4Z7N3WRW7RFKSQA2H3NMN3OGZT523JBSJDQIGNXKWMON6OLDA LSF7256QX47D36LIZE7KU2YQOJWZIPBNEB4JVCTRVPHPGYHY2F4A 3FIFNWTDOA5N623OIIXWXC4BIVJYKBVHFKHXW5TATVUZX3GZORF7RQOFU4 8 0 4XO3PFWPBCR236PLO3I4XA77L3MCT73VLMCZI37KNDBCS7XKNA7Q 0 249999999 1999999992 {[{pay OBFAC2TKRXOGBS7DSJZ2QRLLIKQSIWXR73KG2JXC7SIY2BOGZLWA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 7 1007 [] [] 8 <nil> 0xc00007d9c0 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230311 {test-fast-compactcert 0 0 0} { false} [99 84 170 186 131 3 195 113 174 221 228 139 49 25 237 38 96 176 139 154 221 6 176 161 11 132 51 203 87 127 67 206] 9999999999995000 16}
+ compactcert_test.go:89: Round 9, block {5XOBIAP23VRPLDOJKUGSWVNURZACD6YQUTL5H2MJG5JOHZNH3NTQ 7DUHCPHES25PPKLZPZRNQOLZN2NGLIP7XLWU4G3JPHB6JCJDCYBA VLMHCL6I2AD4ORUXENMHQ7MIPG7GDPV75PPJKZZ6TRFJSBWTLUHQ ICN7RP7DZRYOCDOJGVOT3TX6OPUH5SIOQGAOIJLW2C7E3QAUSTJ5IMNXLQ 9 0 27AFM5JRYOF5HJZLCWFVLG3HV5QBESW2BWFXXXFCWQOTXS3DEMFQ 0 249999999 2249999991 {[{pay PMFXZQDSNUKP35FIEUNDPSBKRATJX3A2GBIHWK7RCGNFLMZR7LOA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 8 1008 [] [] 9 <nil> 0xc0002c3680 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230317 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 10, block {5FJVHTEGC4EBUUCZ7VHFEITB3TD4CKEKYCW4FZSNPQOPD3E5TR2Q 5XOBIAP23VRPLDOJKUGSWVNURZACD6YQUTL5H2MJG5JOHZNH3NTQ G3NY65OEN5ON5AKGK6YYC7CF5BD57F4P2ZRTON4B5YTMFQNRDGSA 5IJMRPGGPKTTJJ42PYNELMYJMRBIEM2TGXEVC4W7IR7IYCLK6UT3EYMJYQ 10 0 6MUHLAXEVY273M6NMHCPXICHN4O62OXGH5Z4BROVIDCMO7YPPV4Q 0 249999999 2499999990 {[{pay 5LCRQAULQEZW5ZMVR7FAGXTEJBTT5WOWAZMCMAH5RJZTSXKWW2OA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 9 1009 [] [] 10 <nil> 0xc00064cd40 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230322 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 11, block {FDHAUUXMKTBT3CEM6CFTE7OQYT6U4XNJCEF343A5FJIXGEE76N7Q 5FJVHTEGC4EBUUCZ7VHFEITB3TD4CKEKYCW4FZSNPQOPD3E5TR2Q J7NY6DTVA27BB2XPXAQ34M7IDAVVNUYVSEWZXS6AM2U2C5XQEKHQ VWNC7Y3JJM7UL2SMZOM62M3AAK2NSO2G4RB45JIII7DU5VJE3PQKZ62K4E 11 0 BARLVCIDPI4RJKWXDJVZTE3UR35UILN4W6AA4QMUDPHEH747J6OA 0 249999999 2749999989 {[{pay TKLLOE6SQHBZMAKODFEMOLLRUD6QCOTISUVJI7M7VNEJYCVW4WYA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 10 1010 [] [] 11 <nil> 0xc0006d6740 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230328 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 12, block {CYSTAL4LEEJNFIM7QPDIYNYFAPZHEVM4HAUDPQMEMASJMBCGVCUQ FDHAUUXMKTBT3CEM6CFTE7OQYT6U4XNJCEF343A5FJIXGEE76N7Q Z2G6TQ2NIJULJ7ABGI4V4QBPHPHQEKDXP65MXBOY54UQPN4QFNNA XPVRVZH3VHFZUAVBKASRMDLXPHHAS4TE6TEL3JTZPYR6BX72EKDZA7MIDQ 12 0 LZSY3CTEWWXE34MNLLW2VZM5IRR4UESP4VKICRDLNIHFVHRRB2KQ 0 249999999 2999999988 {[{pay LM4PZOZGJYSSFOL3RQA75YVVJWE75PJ4LB73ZX222PN7SBLH6LVQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 11 1011 [] [] 12 <nil> 0xc0006d72c0 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230333 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 13, block {GLOTODLLX53NLOCFNBIOCCSK36SHC3Z7SEHR4RBYUUAGK6EVLOZQ CYSTAL4LEEJNFIM7QPDIYNYFAPZHEVM4HAUDPQMEMASJMBCGVCUQ WYEC5YEP5MPSIOA5IXPAVC7IYZHBTMQTJ3X25O3CBFIZW4VEDRTA ICN7RP7DZRYOCDOJGVOT3TX6OPUH5SIOQGAOIJLW2C7E3QAUSTJ5IMNXLQ 13 0 GOBZNZXR4VFPSLV4LES2GP3IUBTM442D2YNXO6UKLLJUZTPSYPBQ 0 249999999 3249999987 {[{pay A72RL6XPRXHHTSQ6KY32Q3DJRPPQP6XXI7ATDQXVVVP6SY4X3ISQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 12 1012 [] [] 13 <nil> 0xc0006730c0 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230338 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 14, block {XFT47WFAQM7AA4FLMLOI3DBUIFXXVNPGELKPUKYAZ7CTBJD4URDQ GLOTODLLX53NLOCFNBIOCCSK36SHC3Z7SEHR4RBYUUAGK6EVLOZQ FANFJ364DNE2WNI3XTK2AZCDRXW3VMFXJKEE6S6HUAFD2X2KGJ5Q 5IJMRPGGPKTTJJ42PYNELMYJMRBIEM2TGXEVC4W7IR7IYCLK6UT3EYMJYQ 14 0 5K6DJPBPWDS2EE2AEHWW3PLRS2JYLCXY2U5U7X4IRQBY4F4U7UHA 0 249999999 3499999986 {[{pay S7G3NIUF5YHW5ULBDCCJXN455JX3Y2NIZCCZU2B23HSUWKHGFBHA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 12 1012 [] [] 14 <nil> 0xc0001e2d00 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230344 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 15, block {GYJIWG3ADCW2TFSCGP5WEWPWZASUBKLAOPXI6ORQOSJB5T36DWQQ XFT47WFAQM7AA4FLMLOI3DBUIFXXVNPGELKPUKYAZ7CTBJD4URDQ P2AKQX6DJRF54UATEX472XEVPVVOEE6IKIEUTJK37QSYGUQO4XCA QE2T4IZYSGIR4O4W4NGACYFGZCODIPQFYIIK4YYFEI6C4ANM2CWPC4XL44 15 0 ZK4BYLTALCT2ZSWE4KU6464DZBHCNQ3DC5EV26VG72HYKLW2X62A 0 249999999 3749999985 {[{pay OBDLLDIULZMY62KHMMB2FNQ3UMH652MYGTLVXBVH2B6U6BVOYT6A 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 14 1014 [] [] 15 <nil> 0xc0002c3880 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230351 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 16, block {MCXJ4P7V2E7E565VMW6YAWOS5AGQHON37FPITKJWSAMYMAJCN7EA GYJIWG3ADCW2TFSCGP5WEWPWZASUBKLAOPXI6ORQOSJB5T36DWQQ R6VKNMF6UQLJGQN336I3YZE5FD6R3G4ZWD7VOXHDWWTHWTONSCDA XPVRVZH3VHFZUAVBKASRMDLXPHHAS4TE6TEL3JTZPYR6BX72EKDZA7MIDQ 16 0 PD7ZDZECJ2EQSOF6IQ6ROMA32XIFIF2QBJXYNJVURZLEYZ7RZUTA 0 249999999 3999999984 {[{pay 2X4CW6EUOQHOPSUWM4EUSTQFO3WSY2ZETG6XP6Y2JCLSHFGSPJ6Q 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 15 1015 [] [] 16 <nil> 0xc00007dbc0 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230355 {test-fast-compactcert 0 0 0} { false} [187 97 223 219 164 82 107 123 150 229 176 156 112 139 64 234 138 1 57 145 216 105 218 99 156 83 120 159 42 238 87 122] 9999999999987000 16}
+ compactcert_test.go:89: Round 17, block {5PUZBMGQZPNJ5RVQ3MACDWSCKOE7XNDLMC7MIUSXZGWBYYCDDLJA MCXJ4P7V2E7E565VMW6YAWOS5AGQHON37FPITKJWSAMYMAJCN7EA 6GIHPG3FYNPAMFTOPJEC6O6LBDWNFSEHTGQAR2JLAKNZ2YNJKVIQ VWNC7Y3JJM7UL2SMZOM62M3AAK2NSO2G4RB45JIII7DU5VJE3PQKZ62K4E 17 0 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 0 249999999 4249999983 {[]} 1617230361 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 18, block {7RXA4EDJHKNU22MPAZV5OGUAEFKKKAUFZSY2BB2WG5PDCAOHFA5Q 5PUZBMGQZPNJ5RVQ3MACDWSCKOE7XNDLMC7MIUSXZGWBYYCDDLJA G7SLHCTDR2HLPELSISEKFYVPD4ONUWNM4ZRQ35HKCYHBTAWVJ25A FBCT3CKJXTBBBOGCNPCONOOXJUFWSMK5SLSE3DJ7LQC2T3BTYBEGZDNE34 18 0 5IRNKVYTFSSSHQ62HPABRMNZTRCYSBFKY7LOJZ626BXRMZPD3T3Q 0 249999999 4499999982 {[{pay OW4FFKP6L5ZUP444RJUCBU3S26H46TI6DIF7Z4QYABDOZL4ZQ6VA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 16 1016 [] [] 18 <nil> 0xc00043a000 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []} {pay EZFQQF2XHNLWIQOVX72EGK2CQXJ5TYGRQKS2V6546SIFNOPWFJWQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 17 1017 [] [] 18 <nil> 0xc00043a040 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230366 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 19, block {A6AKZ74CYSKI6M6UP64XF6I6I2HDGPTCBG5ZOQYRRNSHJYRUQJDQ 7RXA4EDJHKNU22MPAZV5OGUAEFKKKAUFZSY2BB2WG5PDCAOHFA5Q MWKB6ULUA2NIJ3FJAAOQOB6DP7LWYFS5SZMV3ZC3KQ2DPL52YOAQ XPVRVZH3VHFZUAVBKASRMDLXPHHAS4TE6TEL3JTZPYR6BX72EKDZA7MIDQ 19 0 AT3DUAATCMKVZMACHXKH5CZ4CGPMYRQFWVPXRJFGJ6LGGCO7P6OA 0 249999999 4749999981 {[{pay QBEC3DC2SXUPG2S4N3Z2G3S3BO6EYQGOJ7F2MI3OOMCU6I24XEMA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 18 1018 [] [] 19 <nil> 0xc00043abc0 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230372 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 20, block {GW3D4YG62WNSL2OP7XFZDYRUZAGHJMZALO2TE4Q7FSLUIDKRS6IA A6AKZ74CYSKI6M6UP64XF6I6I2HDGPTCBG5ZOQYRRNSHJYRUQJDQ PVQFIKHXTR46F4JPCQYO4VIKIKYAOIUGZHGWYGP7Q2I54OIDHBBA XPVRVZH3VHFZUAVBKASRMDLXPHHAS4TE6TEL3JTZPYR6BX72EKDZA7MIDQ 20 0 N24A7YZLIU36DEMVNDTSXSZMJPHZCM664OY2HRQU4UIVUU4KURHA 0 249999999 4999999980 {[{pay XLGJBEBYMYOFBG3VVEJORLNWLKLTEDBW5WNGCFLXKCTGE66LZL6Q 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 19 1019 [] [] 20 <nil> 0xc0004b0ac0 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []} {pay W7W6CBRUC55LX2M6U2VCKQ3VQSFNXWRZTQ4LC2TINLXO5DUOME6A 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 19 1019 [] [] 20 <nil> 0xc0004b0b00 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230378 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 21, block {D4S5MR343YI6Z25GCSHDA64ZX4WGEFVQNGMM3QC3VQERJ3H4F5NA GW3D4YG62WNSL2OP7XFZDYRUZAGHJMZALO2TE4Q7FSLUIDKRS6IA 4B6QNIMPSIMMHSZ3MKWDQUENGJV4VHHGO3UZ4ZCNX6X7V4S3H5WA QE2T4IZYSGIR4O4W4NGACYFGZCODIPQFYIIK4YYFEI6C4ANM2CWPC4XL44 21 0 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 0 249999999 5249999979 {[]} 1617230383 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 22, block {JOP24AFMKGFB3UQHTFXVGDH7HFOQUEYFCPI23EDPU4LOZHX52YVQ D4S5MR343YI6Z25GCSHDA64ZX4WGEFVQNGMM3QC3VQERJ3H4F5NA JW2NUTCFO4VDL3OZCFLUT64WPJBEF22V42ECAQTBC2U75QAYHMSQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 22 0 RMRCTE3I7VAWFN22V6QAXOYRTKGBXDPGF2KRLUZKE6KXNURSNIYA 0 249999999 5499999978 {[{pay X7VCWHLJ3DHRW7VZNRD2HOYNVO6TJ3GL6CSG7BJC36NC2OKZFUVA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 20 1020 [] [] 22 <nil> 0xc00049a840 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230388 {test-fast-compactcert 0 0 0} { false} [] 0 16}
+ compactcert_test.go:89: Round 23, block {T3LFHX7XW2NSWGTWSL45YGRSSU52WRU5GM2CZYDNAS4J6M5SDCZQ JOP24AFMKGFB3UQHTFXVGDH7HFOQUEYFCPI23EDPU4LOZHX52YVQ ACKOSC4HML4CXRBWMQIHMINTKCEJ6IE2TNG7J2GL3W5WHBWSJ2JA 5IJMRPGGPKTTJJ42PYNELMYJMRBIEM2TGXEVC4W7IR7IYCLK6UT3EYMJYQ 23 0 S2ILORSTAXSFPET2ARE5UU345EG7FMTFAOXDHQKZPN6MQYEWSZ7A 0 249999999 5749999977 {[{cert 3NRLMV7E5NRR5FJ4NSFW4HCKQHEYO6UBJZ4RDBKNRMT3HZK6M3JQ 4BBME3TBESPNF4WZROAM3EBMMGSYLTOH7USWH2C37UEGNWHYFKXLLLMHOA 0 22 1022 [] [] 23 <nil> <nil> <nil> <nil> <nil> <nil> <nil> 0xc0002c0220 0 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []} {pay UJVFOKR67GO2INJZTORYUQ6UCVX4JRY7YE7SDKBYCE2ZZNQG26ZA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 22 1022 [] [] 23 <nil> 0xc0002c3f80 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230394 {test-fast-compactcert 0 0 0} { false} [] 0 24}
+ compactcert_test.go:89: Round 24, block {VHIGVJ4T5VMRHN754PVFJXWI4XMQEEUXT25Y4AOYIGQANVR7FA6Q T3LFHX7XW2NSWGTWSL45YGRSSU52WRU5GM2CZYDNAS4J6M5SDCZQ 4ENS6HLVX5WGE33OZAABVZZXEWTCNV4WKS7PFA6YCKGL4JLNV7YQ ICN7RP7DZRYOCDOJGVOT3TX6OPUH5SIOQGAOIJLW2C7E3QAUSTJ5IMNXLQ 24 0 6LIKNDUDBQNC7PAI5CLWA3V26PVIQ35SH5GS5IRET2DMF4FDJGSQ 0 249999999 5999999976 {[{pay JK7QOOI22BKJNVA6IFZEE4A7OWBG4CLXCWSKXSAWQAEXREM7ZICA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 23 1023 [] [] 24 <nil> 0xc0002cc080 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230399 {test-fast-compactcert 0 0 0} { false} [183 79 22 14 195 141 139 6 182 177 143 252 117 84 5 199 36 245 68 163 251 94 231 235 196 194 158 244 158 168 121 193] 9999999999979000 24}
+ compactcert_test.go:89: Round 25, block {KDLXFDZKRIGHF4QTCOJHY3UUXKRQK3MX3BHFAVWGPCRLU7FZNQXA VHIGVJ4T5VMRHN754PVFJXWI4XMQEEUXT25Y4AOYIGQANVR7FA6Q IF3ORFRHF2PBTZIGYVLZC24NUHTWB772ZIQKVSK6KVZKIBIETUDQ 5IJMRPGGPKTTJJ42PYNELMYJMRBIEM2TGXEVC4W7IR7IYCLK6UT3EYMJYQ 25 0 CQYRR37NYTUHDYPEAOV3PCCLDLOQORQKLJB754Y2D4WSEWW4ZBBQ 0 249999999 6249999975 {[{pay WWKT3FG7OY5WT6ROWRETL254T2Z5NSGNGW7FABTA5JRSE4BT3BQQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 24 1024 [] [] 25 <nil> 0xc00021e680 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230404 {test-fast-compactcert 0 0 0} { false} [] 0 24}
+ compactcert_test.go:89: Round 26, block {64DGKJURDYAYHXLEX5MUJTKR7MKRI7RARQWWWDIIPHK6GBCGBJVA KDLXFDZKRIGHF4QTCOJHY3UUXKRQK3MX3BHFAVWGPCRLU7FZNQXA WG2GIJTOIH52L3XCBIBIHEBLEKYYUZZPERH72FPUZJIDTITY5IFQ XPVRVZH3VHFZUAVBKASRMDLXPHHAS4TE6TEL3JTZPYR6BX72EKDZA7MIDQ 26 0 M4TTKGJKW6DNC6JE5ZWHBRLMCTQ5KLMC7YW2TKZNBMMA7YIYJXJA 0 249999999 6499999974 {[{pay XMAUZD3TIVKIFWIBR7KMAEQQYMOPFQFX3IKZZXE5N2AJGMCAQ2TQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 25 1025 [] [] 26 <nil> 0xc00021ec40 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230410 {test-fast-compactcert 0 0 0} { false} [] 0 24}
+ compactcert_test.go:89: Round 27, block {IL7KHIO7FTS4ZOVZ2M2WS6N3F2VP72W2CAFQJCHAT7OCKT2C4SSA 64DGKJURDYAYHXLEX5MUJTKR7MKRI7RARQWWWDIIPHK6GBCGBJVA WRQSWRX7DYZIIBJSOGSIPTLED46R3PJV2UHFXUPBNREXJ3AKZNKQ B4D4WAMUWDSKPLZT5M245BX3UGLOTMU5BPND2Q4FRXF4BHVPE4HHQPH6XA 27 0 JA7HOJX7IUJGOS5FGXMG6LWZP66N7AXOZ6VHGKWHZWYBT35UOMPQ 0 249999999 6749999973 {[{pay GZNFPJIP6J4XRZIEFWYUETGNZLRT3WEWBFTKDOMPSLBQSK357HYA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 25 1025 [] [] 27 <nil> 0xc0002c2940 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230417 {test-fast-compactcert 0 0 0} { false} [] 0 24}
+ compactcert_test.go:89: Round 28, block {R75G3ZJVBSJEMPIDZN4OHUKHO22PWMTEJVUJONZMEBYMTZLLYZQQ IL7KHIO7FTS4ZOVZ2M2WS6N3F2VP72W2CAFQJCHAT7OCKT2C4SSA GJ35VV4UVT4YYMOXLZ4YAYO2JXB445K2X3TO5YX3F5SJ4DDXPFIA VWNC7Y3JJM7UL2SMZOM62M3AAK2NSO2G4RB45JIII7DU5VJE3PQKZ62K4E 28 0 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 0 249999999 6999999972 {[]} 1617230422 {test-fast-compactcert 0 0 0} { false} [] 0 24}
+ compactcert_test.go:89: Round 29, block {ATPD63JSWSOVD7FZ54FFM6LH4ZDYSPFESSI47Q3WNB5Z3N6GERZA R75G3ZJVBSJEMPIDZN4OHUKHO22PWMTEJVUJONZMEBYMTZLLYZQQ U3PT63BMP23DGHMDGE6CN2NZENJK3YWKJMER43ZH4IXXPOYGX3LA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 29 0 KTXF52QZTF4NYE5PZPENWWRI76DLD6MLVOA4VFCEVK3JES66IIHQ 0 249999999 7249999971 {[{pay O2OJJCNT6TNUQI3OQ6WZGQDXRY5ENLXDQCRZQ6Q5KC3P62IYZ2PQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 27 1027 [] [] 29 <nil> 0xc00007dd80 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []} {pay FD2RZWZWHZEWMYWQYNL54NT572JLORRPQ2TR7QNUZZNPIT2QN3YQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 28 1028 [] [] 29 <nil> 0xc00007ddc0 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []} {pay KQIPGV3ENMJNXUTBDK2QFTDCJ52PEE3ISBC2FMHZSVUSZ6GKWCNA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 28 1028 [] [] 29 <nil> 0xc00007de00 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230427 {test-fast-compactcert 0 0 0} { false} [] 0 24}
+ compactcert_test.go:89: Round 30, block {7U5H4FEBP4GMAKUEW2564G2LI23FH43UWVMTYJ6B75JTQGQ65WQA ATPD63JSWSOVD7FZ54FFM6LH4ZDYSPFESSI47Q3WNB5Z3N6GERZA ZCI2IFUVJBXMOUDAH2CEWRRIJCO5IOH5LWQUDQ64TPKFNBJBXS3A QE2T4IZYSGIR4O4W4NGACYFGZCODIPQFYIIK4YYFEI6C4ANM2CWPC4XL44 30 0 JM3TIGR5EROVCTQ2EPTRA5NROSMU35XWVHPIWSSCQJ75YXURSJXQ 0 249999999 7499999970 {[{cert GMLXKXZINOP7QELU6RH22HDCFOAD5UP4IOLQ4AI5COWVJRTQR3MQ 4BBME3TBESPNF4WZROAM3EBMMGSYLTOH7USWH2C37UEGNWHYFKXLLLMHOA 0 29 1029 [] [] 30 <nil> <nil> <nil> <nil> <nil> <nil> <nil> 0xc000596520 0 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230432 {test-fast-compactcert 0 0 0} { false} [] 0 32}
+ compactcert_test.go:89: Round 31, block {4HG2C4XXT6VBZ7DLDZ3KOMAMVMAP5QO52ELKGTCD2B7QPPKOJW5Q 7U5H4FEBP4GMAKUEW2564G2LI23FH43UWVMTYJ6B75JTQGQ65WQA WLPPPBRW7ULPCSIUQ6AXNZCHHZC7WNYMHIXV3NDSDL6HNQZD55WA VWNC7Y3JJM7UL2SMZOM62M3AAK2NSO2G4RB45JIII7DU5VJE3PQKZ62K4E 31 0 FRPB4ZO7BBWJVIYZEDLU3BIWTROSYK37RXGIEDYDNB446VI2SY5A 0 249999999 7749999969 {[{pay 6T7LX42PE65DY47HWSVRS57RCARJUCK2G67H7DCMILICROUWGS5Q 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 29 1029 [] [] 31 <nil> 0xc00021f7c0 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230438 {test-fast-compactcert 0 0 0} { false} [] 0 32}
+ compactcert_test.go:89: Round 32, block {63Y2URATG3XTICZJCT4MFXNJALV27YYMUAB35WISVEPWIQSB4Q3Q 4HG2C4XXT6VBZ7DLDZ3KOMAMVMAP5QO52ELKGTCD2B7QPPKOJW5Q 6KNQXYGN6VNDONCHH2F7DDNFPJEODUKKJE3UJA5C26S7UOH7H6JA 5IJMRPGGPKTTJJ42PYNELMYJMRBIEM2TGXEVC4W7IR7IYCLK6UT3EYMJYQ 32 0 PMYCX3IZFVE3F4NQDFPUB6N6RHOKD5DSFTL4P5FC3R6EMZKPPGEQ 0 249999999 7999999968 {[{pay 3ZMHN7MM3HYVTRCOSQIGWOLDDXLU7I5PL7OW74YSQUZCL2TEK6LA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 31 1031 [] [] 32 <nil> 0xc00059e640 <nil> <nil> <nil> <nil> <nil> <nil> 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230444 {test-fast-compactcert 0 0 0} { false} [233 145 169 123 87 83 112 145 172 208 106 68 213 247 28 237 130 73 209 98 28 184 77 97 145 124 130 151 111 242 51 135] 9999999999971000 32}
+algod(30833) : Exiting on terminated
+algod(30879) : Exiting on terminated
+algod(30867) : Exiting on terminated
+algod(30921) : Exiting on terminated
+algod(30941) : Exiting on terminated
+algod(30840) : Exiting on terminated
+algod(30856) : Exiting on terminated
+algod(30903) : Exiting on terminated
+algod(30913) : Exiting on terminated
+algod(30930) : Exiting on terminated
+algod(30849) : Exiting on terminated
+algod(30825) : Exiting on terminated
+--- PASS: TestCompactCerts (224.23s)
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/features/compactcert 224.312s
+=== RUN TestBasicMultisig
+=== PAUSE TestBasicMultisig
+=== RUN TestZeroThreshold
+=== PAUSE TestZeroThreshold
+=== RUN TestZeroSigners
+=== PAUSE TestZeroSigners
+=== RUN TestDuplicateKeys
+=== PAUSE TestDuplicateKeys
+=== CONT TestZeroSigners
+=== CONT TestDuplicateKeys
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroSigners/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDuplicateKeys/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroSigners/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDuplicateKeys/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroSigners/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroSigners/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDuplicateKeys/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDuplicateKeys/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(31530) : No REST API Token found. Generated token: f80b041bcfb19db234f704bf8d9c5fe1f57da15dcc119617ea726c0268eb59f4
+algod(31530) : No Admin REST API Token found. Generated token: 964a11e61aecea5ad297a94911fcc7070516ef558dd025681a18d86b71a4e155
+algod(31530) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroSigners/Primary/node.log
+algod(31530) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(31530) : Initializing the Algorand node...
+algod(31537) : No REST API Token found. Generated token: 0456667b9c472fa4c6b5a85c8829303d43b527ef39dbca4341df21dffc2c6c6d
+algod(31537) : No Admin REST API Token found. Generated token: f31b840d17d9815c8c4d0ad163ae5404448b14088266894dc0f597a671379a4e
+algod(31537) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDuplicateKeys/Primary/node.log
+algod(31530) : Success!
+algod(31530) : ⇨ http server started on 127.0.0.1:8080
+algod(31530) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(31537) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(31537) : Initializing the Algorand node...
+algod(31537) : Success!
+algod(31537) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:40777. Press Ctrl-C to exit
+algod(31537) : ⇨ http server started on 127.0.0.1:40777
+algod(31543) : No REST API Token found. Generated token: bb56ba29b190894bbd93711dbc9bece0851ae6428e03c33f146c52de4bdea38d
+algod(31543) : No Admin REST API Token found. Generated token: 3c10300c731fb4295320d35aa5f8c46a08512f3efdf055e15c7f4a512f821f13
+algod(31543) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroSigners/Node/node.log
+algod(31551) : No REST API Token found. Generated token: 6a64df15d7fca1498381e32068bbdb6cde110cbfdf94cadff8c44160409a9e9e
+algod(31551) : No Admin REST API Token found. Generated token: 92bfaff60d1072ba253089e0b9b66f93715e392010772121aaafc11d56025f21
+algod(31551) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDuplicateKeys/Node/node.log
+algod(31543) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(31543) : Initializing the Algorand node...
+algod(31543) : Success!
+algod(31543) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:43247. Press Ctrl-C to exit
+algod(31543) : ⇨ http server started on 127.0.0.1:43247
+algod(31551) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(31551) : Initializing the Algorand node...
+algod(31551) : Success!
+algod(31551) : ⇨ http server started on 127.0.0.1:41349
+algod(31551) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:41349. Press Ctrl-C to exit
+algod(31530) : Exiting on terminated
+algod(31543) : Exiting on terminated
+--- PASS: TestZeroSigners (15.21s)
+=== CONT TestBasicMultisig
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestBasicMultisig/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestBasicMultisig/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestBasicMultisig/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestBasicMultisig/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(31636) : No REST API Token found. Generated token: cb0ce1e70821180e42593e6b7b81b8010cdc6aef341138de812c18e64ded1aea
+algod(31636) : No Admin REST API Token found. Generated token: 00cc450958ef513b1493cdfbedfebcaa08277a45e754dc32e445b2178d3efc66
+algod(31636) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestBasicMultisig/Primary/node.log
+algod(31636) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(31636) : Initializing the Algorand node...
+algod(31636) : Success!
+algod(31636) : ⇨ http server started on 127.0.0.1:8080
+algod(31636) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(31643) : No REST API Token found. Generated token: f0b78103e8424c75bcc0e0158e6a6708fdf0d8145b1356408471f1ec1b72c7cb
+algod(31643) : No Admin REST API Token found. Generated token: f2f6603e4969ffffd6875dd2f5097783ed67d33a11cdcdc194435f3b215323c9
+algod(31643) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestBasicMultisig/Node/node.log
+algod(31643) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(31643) : Initializing the Algorand node...
+algod(31643) : Success!
+algod(31643) : ⇨ http server started on 127.0.0.1:37557
+algod(31643) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:37557. Press Ctrl-C to exit
+algod(31551) : Exiting on terminated
+algod(31537) : Exiting on terminated
+--- PASS: TestDuplicateKeys (31.65s)
+=== CONT TestZeroThreshold
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroThreshold/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroThreshold/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroThreshold/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroThreshold/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(31722) : No REST API Token found. Generated token: e7ffddccbe27858147d28d72035691139fa3fd690c9763ed55a96fe2ad5d05d9
+algod(31722) : No Admin REST API Token found. Generated token: d504a0276590b42d8e6f4a5730373fe7a5d7241695a4b475299cfee1e8beca94
+algod(31722) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroThreshold/Primary/node.log
+algod(31722) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(31722) : Initializing the Algorand node...
+algod(31722) : Success!
+algod(31722) : ⇨ http server started on 127.0.0.1:41599
+algod(31722) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:41599. Press Ctrl-C to exit
+algod(31729) : No REST API Token found. Generated token: 2605ba53edb3b8748b8dd23452fa63da60556264b865e03d802d372d09b44c60
+algod(31729) : No Admin REST API Token found. Generated token: 0f75ec66fd7d7025de193962c6f22219cb8c126961f026481424979e4fdbdcfc
+algod(31729) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroThreshold/Node/node.log
+algod(31729) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(31729) : Initializing the Algorand node...
+algod(31729) : Success!
+algod(31729) : ⇨ http server started on 127.0.0.1:36485
+algod(31729) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:36485. Press Ctrl-C to exit
+algod(31722) : Exiting on terminated
+algod(31729) : Exiting on terminated
+--- PASS: TestZeroThreshold (14.61s)
+algod(31643) : Exiting on terminated
+algod(31636) : Exiting on terminated
+--- PASS: TestBasicMultisig (52.16s)
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/features/multisig 67.439s
+=== RUN TestParticipationKeyOnlyAccountParticipatesCorrectly
+=== PAUSE TestParticipationKeyOnlyAccountParticipatesCorrectly
+=== RUN TestNewAccountCanGoOnlineAndParticipate
+ onlineOfflineParticipation_test.go:105:
+--- SKIP: TestNewAccountCanGoOnlineAndParticipate (0.00s)
+=== RUN TestOnlineOfflineRewards
+=== PAUSE TestOnlineOfflineRewards
+=== RUN TestPartkeyOnlyRewards
+ participationRewards_test.go:137:
+--- SKIP: TestPartkeyOnlyRewards (0.00s)
+=== RUN TestRewardUnitThreshold
+=== PAUSE TestRewardUnitThreshold
+=== RUN TestRewardRateRecalculation
+=== PAUSE TestRewardRateRecalculation
+=== CONT TestParticipationKeyOnlyAccountParticipatesCorrectly
+=== CONT TestRewardUnitThreshold
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/Online.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestParticipationKeyOnlyAccountParticipatesCorrectly/Offline.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestParticipationKeyOnlyAccountParticipatesCorrectly/Online.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestParticipationKeyOnlyAccountParticipatesCorrectly/Partkey.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestParticipationKeyOnlyAccountParticipatesCorrectly/Rich.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/Partkey.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/SmallWallet.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/Offline.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/Partkey.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/Online.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestParticipationKeyOnlyAccountParticipatesCorrectly/Rich.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestParticipationKeyOnlyAccountParticipatesCorrectly/Partkey.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestParticipationKeyOnlyAccountParticipatesCorrectly/Online.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/SmallWallet.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(32118) : No REST API Token found. Generated token: b2941383a9de10a3657bccdff84a90f8cf5adac07b5dbf8ee5e741ce2b70ba5f
+algod(32118) : No Admin REST API Token found. Generated token: f490b0d3ec37b1e31ba0fb5fe923202d4bfe0b6326f7b85fb07fbaa9643c0d90
+algod(32118) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestParticipationKeyOnlyAccountParticipatesCorrectly/Primary/node.log
+algod(32118) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(32118) : Initializing the Algorand node...
+algod(32124) : No REST API Token found. Generated token: 501c2596368437b03b1c0bde5abd3b3c0d2850e16b4c7bace8a75cfb47edd81f
+algod(32124) : No Admin REST API Token found. Generated token: b84d3268f62fb30ef1e88ebe444b028c1c79c11b52f3ee81809eee3578137eed
+algod(32124) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/Online/node.log
+algod(32118) : Success!
+algod(32118) : ⇨ http server started on 127.0.0.1:8080
+algod(32118) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(32124) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(32124) : Initializing the Algorand node...
+algod(32124) : Success!
+algod(32124) : ⇨ http server started on 127.0.0.1:45635
+algod(32124) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:45635. Press Ctrl-C to exit
+algod(32131) : No REST API Token found. Generated token: d1e45dd1b1bc6a3980083ed65353a9e8c0fb8dd201a0d800289a626a34ad2e5a
+algod(32131) : No Admin REST API Token found. Generated token: b524097f7267e2b00d1ea2913fecd4b879e585d39f3890917e51869095371501
+algod(32131) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestParticipationKeyOnlyAccountParticipatesCorrectly/Node/node.log
+algod(32131) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(32131) : Initializing the Algorand node...
+algod(32145) : No REST API Token found. Generated token: 2af7fcf94aaec0bb7e762d7591902a5ed5aabb29aadfcaee4b3139e4b987cbeb
+algod(32145) : No Admin REST API Token found. Generated token: b793bed62f695ab0790c3edf501d3007f9789d83edae62987a3560cc298c2a0f
+algod(32145) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/Offline/node.log
+algod(32131) : Success!
+algod(32131) : ⇨ http server started on 127.0.0.1:41251
+algod(32131) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:41251. Press Ctrl-C to exit
+algod(32145) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(32145) : Initializing the Algorand node...
+algod(32145) : Success!
+algod(32145) : ⇨ http server started on 127.0.0.1:40343
+algod(32145) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:40343. Press Ctrl-C to exit
+algod(32169) : No REST API Token found. Generated token: 76ddef86fb25c12f2674036230fb16a014d21b51f0f88a598a76cbb1e3560349
+algod(32169) : No Admin REST API Token found. Generated token: db3d468ddc8df18d04b273488839d65a5ef2352ad0146c2250e51fb274e1bf7e
+algod(32169) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/Partkey/node.log
+algod(32169) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(32169) : Initializing the Algorand node...
+algod(32169) : Success!
+algod(32169) : ⇨ http server started on 127.0.0.1:39177
+algod(32169) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:39177. Press Ctrl-C to exit
+algod(32189) : No REST API Token found. Generated token: a1eb278a09fd5575eb1e4f2de669e5f5d92d0eed41af13aa530c5b168142574c
+algod(32189) : No Admin REST API Token found. Generated token: 4e69519005b624d18a5dc40b2bf92fbf21bdcdeea8a3571ede6a27f39fd9661e
+algod(32189) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/SmallNode/node.log
+algod(32189) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(32189) : Initializing the Algorand node...
+algod(32189) : Success!
+algod(32189) : ⇨ http server started on 127.0.0.1:44589
+algod(32189) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:44589. Press Ctrl-C to exit
+algod(32131) : Exiting on terminated
+algod(32118) : Exiting on terminated
+--- PASS: TestParticipationKeyOnlyAccountParticipatesCorrectly (32.97s)
+=== CONT TestOnlineOfflineRewards
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/Offline.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/Online.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/SmallWallet.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/Partkey.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/Online.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/SmallWallet.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/Partkey.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(32347) : No REST API Token found. Generated token: 0fbfe2b13ae71d16dac85ad687a11c23f6d7f0d0ac4389bb19f7da5d4e4bed8c
+algod(32347) : No Admin REST API Token found. Generated token: 1173696532ee5a8eb784c45c9a4401efd2a85f01e2d49a1efdf260ff8010bf98
+algod(32347) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/Online/node.log
+algod(32347) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(32347) : Initializing the Algorand node...
+algod(32347) : Success!
+algod(32347) : ⇨ http server started on 127.0.0.1:8080
+algod(32347) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(32354) : No REST API Token found. Generated token: bd2d0f855c93caba74886e5e88b35e4353a66d16eca8f3c263c4f04a4e4a0978
+algod(32354) : No Admin REST API Token found. Generated token: 35f20a7d2d9164ae1b9f76888c6ea8500863800bf0ff3273e8039de04a4258d4
+algod(32354) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/SmallNode/node.log
+algod(32354) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(32354) : Initializing the Algorand node...
+algod(32354) : Success!
+algod(32354) : ⇨ http server started on 127.0.0.1:36723
+algod(32354) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:36723. Press Ctrl-C to exit
+algod(32361) : No REST API Token found. Generated token: 5e7cad1a41f02a0ea22861bf5cb6fcbe89bb6c1766551563679a3be14de9a17a
+algod(32361) : No Admin REST API Token found. Generated token: 0e420dafaddb9ea7334fa809b3ae1d22d0b6598b3e2f7f6ea547271570b8cd43
+algod(32361) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/Offline/node.log
+algod(32361) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(32361) : Initializing the Algorand node...
+algod(32361) : Success!
+algod(32361) : ⇨ http server started on 127.0.0.1:40721
+algod(32361) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:40721. Press Ctrl-C to exit
+algod(32369) : No REST API Token found. Generated token: ebea49b10fc3c244dec7fca982469554dfd2a8684b963134d205cbf80dd14b0d
+algod(32369) : No Admin REST API Token found. Generated token: 99503eb4275df182b1a94445e8c1211f7e1d734c4d757bc692e6282a19a91a39
+algod(32369) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/Partkey/node.log
+algod(32369) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(32369) : Initializing the Algorand node...
+algod(32369) : Success!
+algod(32369) : ⇨ http server started on 127.0.0.1:32959
+algod(32369) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:32959. Press Ctrl-C to exit
+algod(32189) : Exiting on terminated
+algod(32169) : Exiting on terminated
+algod(32145) : Exiting on terminated
+algod(32124) : Exiting on terminated
+=== CONT TestRewardRateRecalculation
+--- PASS: TestRewardUnitThreshold (79.11s)
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardRateRecalculation/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardRateRecalculation/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardRateRecalculation/Wallet1.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardRateRecalculation/Wallet2.0.3000000.partkey
+test-fast-reward-recalculation 100000
+algod(307) : No REST API Token found. Generated token: 2902709fd1b51789752d7b399a5a405aefb0971d06fe27559b124e946fe9c321
+algod(307) : No Admin REST API Token found. Generated token: 95d9b364f4c826c36ef4723893533ea4668db2ce10b15b4175eb4635484d8e2a
+algod(307) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardRateRecalculation/Primary/node.log
+algod(307) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(307) : Initializing the Algorand node...
+algod(307) : Success!
+algod(307) : ⇨ http server started on 127.0.0.1:39351
+algod(307) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:39351. Press Ctrl-C to exit
+algod(314) : No REST API Token found. Generated token: f7c0d43b1905580c5657c197e1276ca9382e2fe2854a526d35bbeb728d2e7d2d
+algod(314) : No Admin REST API Token found. Generated token: 82a13bab7b191392149b7bcdead54616b2678c9173951b70409e814801ae67d6
+algod(314) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardRateRecalculation/Node/node.log
+algod(314) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(314) : Initializing the Algorand node...
+algod(314) : Success!
+algod(314) : ⇨ http server started on 127.0.0.1:44593
+algod(314) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:44593. Press Ctrl-C to exit
+algod(32354) : Exiting on terminated
+algod(32369) : Exiting on terminated
+algod(32361) : Exiting on terminated
+algod(32347) : Exiting on terminated
+--- PASS: TestOnlineOfflineRewards (97.68s)
+algod(307) : Exiting on terminated
+algod(314) : Exiting on terminated
+--- PASS: TestRewardRateRecalculation (105.57s)
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/features/participation 184.782s
+=== RUN TestBasicPartitionRecovery
+ partitionRecovery_test.go:34:
+--- SKIP: TestBasicPartitionRecovery (0.00s)
+=== RUN TestPartitionRecoverySwapStartup
+ partitionRecovery_test.go:78:
+--- SKIP: TestPartitionRecoverySwapStartup (0.00s)
+=== RUN TestPartitionRecoveryStaggerRestart
+ partitionRecovery_test.go:98:
+--- SKIP: TestPartitionRecoveryStaggerRestart (0.00s)
+=== RUN TestBasicPartitionRecoveryPartOffline
+ partitionRecovery_test.go:159:
+--- SKIP: TestBasicPartitionRecoveryPartOffline (0.00s)
+=== RUN TestPartitionHalfOffline
+ partitionRecovery_test.go:210:
+--- SKIP: TestPartitionHalfOffline (0.00s)
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/features/partitionRecovery 0.107s
+=== RUN TestTealCompile
+ compile_test.go:32:
+--- SKIP: TestTealCompile (0.00s)
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/features/teal 0.084s
+=== RUN TestAccountInformationV2
+=== PAUSE TestAccountInformationV2
+=== RUN TestAssetValidRounds
+=== PAUSE TestAssetValidRounds
+=== RUN TestAssetConfig
+ asset_test.go:188:
+--- SKIP: TestAssetConfig (0.00s)
+=== RUN TestAssetInformation
+=== PAUSE TestAssetInformation
+=== RUN TestAssetGroupCreateSendDestroy
+=== PAUSE TestAssetGroupCreateSendDestroy
+=== RUN TestAssetSend
+=== PAUSE TestAssetSend
+=== RUN TestAssetCreateWaitRestartDelete
+=== PAUSE TestAssetCreateWaitRestartDelete
+=== RUN TestAssetCreateWaitBalLookbackDelete
+ asset_test.go:964:
+--- SKIP: TestAssetCreateWaitBalLookbackDelete (0.00s)
+=== RUN TestAccountsCanClose
+=== PAUSE TestAccountsCanClose
+=== RUN TestGroupTransactions
+=== PAUSE TestGroupTransactions
+=== RUN TestGroupTransactionsDifferentSizes
+=== PAUSE TestGroupTransactionsDifferentSizes
+=== RUN TestGroupTransactionsSubmission
+=== PAUSE TestGroupTransactionsSubmission
+=== RUN TestLeaseTransactionsSameSender
+=== PAUSE TestLeaseTransactionsSameSender
+=== RUN TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7
+=== PAUSE TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7
+=== RUN TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7
+=== PAUSE TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7
+=== RUN TestLeaseTransactionsSameSenderDifferentLease
+=== PAUSE TestLeaseTransactionsSameSenderDifferentLease
+=== RUN TestLeaseTransactionsDifferentSender
+=== PAUSE TestLeaseTransactionsDifferentSender
+=== RUN TestOverlappingLeases
+=== PAUSE TestOverlappingLeases
+=== RUN TestAccountsCanChangeOnlineState
+=== PAUSE TestAccountsCanChangeOnlineState
+=== RUN TestAccountsCanChangeOnlineStateInTheFuture
+=== PAUSE TestAccountsCanChangeOnlineStateInTheFuture
+=== RUN TestTxnMerkleProof
+=== PAUSE TestTxnMerkleProof
+=== RUN TestAccountsCanSendMoney
+=== PAUSE TestAccountsCanSendMoney
+=== RUN TestTransactionPoolOrderingAndClearing
+ transactionPool_test.go:30: test is flaky as of 2019-06-18
+--- SKIP: TestTransactionPoolOrderingAndClearing (0.00s)
+=== RUN TestTransactionPoolExponentialFees
+ transactionPool_test.go:115: new FIFO pool does not have exponential fee txn replacement
+--- SKIP: TestTransactionPoolExponentialFees (0.00s)
+=== CONT TestAssetInformation
+=== CONT TestLeaseTransactionsSameSender
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSender/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetInformation/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSender/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetInformation/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetInformation/Wallet1.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetInformation/Wallet2.0.3000000.partkey
+future 100000
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSender/Wallet2.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSender/Wallet1.0.3000000.partkey
+algod(1888) : No REST API Token found. Generated token: a9bccfd5e553a163d804322c73067f2323648df7d0e17490c19b27bb4b75e70a
+algod(1888) : No Admin REST API Token found. Generated token: 18f3104cbd3a84072b7eb430ed92a75ecb289484d9bf3ec30d47be720890e12c
+algod(1888) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetInformation/Primary/node.log
+algod(1893) : No REST API Token found. Generated token: c2fb5dd0976d4ebdf869d3ac535646f78e7c19b70db7f2585acaafe7dfc290b8
+algod(1893) : No Admin REST API Token found. Generated token: a39db8eb80a56f025be2cb4fbc4fd5b3622135a54f7df645dff310ebfbf1309f
+algod(1893) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSender/Primary/node.log
+algod(1888) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(1888) : Initializing the Algorand node...
+algod(1888) : Success!
+algod(1888) : ⇨ http server started on 127.0.0.1:8080
+algod(1888) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(1893) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(1893) : Initializing the Algorand node...
+algod(1893) : Success!
+algod(1893) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:43599. Press Ctrl-C to exit
+algod(1893) : ⇨ http server started on 127.0.0.1:43599
+algod(1901) : No REST API Token found. Generated token: 1d72e0228cf45054ecc40c9675b5a96f6432386bb5cf6ac54179c8add84373b8
+algod(1901) : No Admin REST API Token found. Generated token: d13d2737c812745791ef45ad9a141eddb99f1a5c24b89f07bbcd413d17b4b6be
+algod(1901) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetInformation/Node/node.log
+algod(1901) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(1901) : Initializing the Algorand node...
+algod(1911) : No REST API Token found. Generated token: 21101d739e3564521574add43044801616cd2478d80b42f3fac89e4f39b64524
+algod(1911) : No Admin REST API Token found. Generated token: 4ef81653435d972cbb472572e380214eb6987ad23198abed43ca846594ec1939
+algod(1911) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSender/Node/node.log
+algod(1901) : Success!
+algod(1901) : ⇨ http server started on 127.0.0.1:35003
+algod(1901) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35003. Press Ctrl-C to exit
+algod(1911) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(1911) : Initializing the Algorand node...
+algod(1911) : Success!
+algod(1911) : ⇨ http server started on 127.0.0.1:40385
+algod(1911) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:40385. Press Ctrl-C to exit
+algod(1893) : Exiting on terminated
+algod(1911) : Exiting on terminated
+--- PASS: TestLeaseTransactionsSameSender (21.84s)
+=== CONT TestTxnMerkleProof
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestTxnMerkleProof/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestTxnMerkleProof/Wallet1.0.3000000.partkey
+future 100000
+algod(2137) : No REST API Token found. Generated token: ed17eff8b94d0feec94e0464f02c99ed73ff6dc4382b0c78e9f0c6e2fbc899fe
+algod(2137) : No Admin REST API Token found. Generated token: 61da9362b421dd18418da4841e69845f3602af166d0acf88e15cc25b8e2276ea
+algod(2137) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestTxnMerkleProof/Primary/node.log
+algod(2137) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(2137) : Initializing the Algorand node...
+algod(2137) : Success!
+algod(2137) : ⇨ http server started on 127.0.0.1:41093
+algod(2137) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:41093. Press Ctrl-C to exit
+algod(1901) : Exiting on terminated
+algod(1888) : Exiting on terminated
+=== CONT TestAccountsCanSendMoney
+--- PASS: TestAssetInformation (29.59s)
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoney/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoney/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoney/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoney/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(2326) : No REST API Token found. Generated token: a445763d6d79f7dca5a3933cb10d0c39dbcaa25daa4613f7848935ebcb8ba7c4
+algod(2326) : No Admin REST API Token found. Generated token: 66e3b53755b799d221f9a0b75a980a1f9700ee827bb9dca4dc1b08b992ac0b56
+algod(2326) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoney/Primary/node.log
+algod(2326) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(2326) : Initializing the Algorand node...
+algod(2326) : Success!
+algod(2326) : ⇨ http server started on 127.0.0.1:8080
+algod(2326) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(2333) : No REST API Token found. Generated token: ad700aa33104b665090e0fa77c228cf72e9f4043b7a6c4183edf2a810be9bdca
+algod(2333) : No Admin REST API Token found. Generated token: 3f27a303940f8667501466be62c7454957707cc09b3700147f6fac86b233b657
+algod(2333) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoney/Node/node.log
+algod(2333) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(2333) : Initializing the Algorand node...
+algod(2333) : Success!
+algod(2333) : ⇨ http server started on 127.0.0.1:42243
+algod(2333) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:42243. Press Ctrl-C to exit
+algod(2137) : Exiting on terminated
+=== CONT TestAccountsCanChangeOnlineStateInTheFuture
+--- PASS: TestTxnMerkleProof (9.01s)
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineStateInTheFuture/Offline2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineStateInTheFuture/Online1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineStateInTheFuture/Offline1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineStateInTheFuture/Online2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineStateInTheFuture/Online1.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineStateInTheFuture/Online2.0.3000000.partkey
+future 100000
+algod(2355) : No REST API Token found. Generated token: cc465040843d821ac35b8476ff03c4e5c69be1d901dd91e20049b0d82b9dd0e6
+algod(2355) : No Admin REST API Token found. Generated token: f8b203d41be7500857937ce8c09b67bd4dff4eb58bdfcfd7249cbeaae0b40b32
+algod(2355) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineStateInTheFuture/Primary/node.log
+algod(2355) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(2355) : Initializing the Algorand node...
+algod(2355) : Success!
+algod(2355) : ⇨ http server started on 127.0.0.1:45241
+algod(2355) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:45241. Press Ctrl-C to exit
+algod(2362) : No REST API Token found. Generated token: 4a64c0065fd49e91a64b0885127a97d6c99209236ec515bd4fdce74cdd2c8a1b
+algod(2362) : No Admin REST API Token found. Generated token: a5addca1ab95ca5d5df86db7ec8028253454890459032ac7b62c384b991b2e07
+algod(2362) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineStateInTheFuture/Node/node.log
+algod(2362) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(2362) : Initializing the Algorand node...
+algod(2362) : Success!
+algod(2362) : ⇨ http server started on 127.0.0.1:35755
+algod(2362) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35755. Press Ctrl-C to exit
+algod(2355) : Exiting on terminated
+algod(2362) : Exiting on terminated
+=== CONT TestAccountsCanChangeOnlineState
+--- PASS: TestAccountsCanChangeOnlineStateInTheFuture (23.47s)
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineState/Online2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineState/Online1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineState/Offline1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineState/Offline2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineState/Online2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineState/Online1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(2596) : No REST API Token found. Generated token: 208ce4ef05348f54e54ca06a7eecf6de3712c060063b47f50b01c3bc7a96e7cc
+algod(2596) : No Admin REST API Token found. Generated token: e78f853ccba6d88093b53cb94df633f2d576054d792b640df74a76f9ad674ae8
+algod(2596) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineState/Primary/node.log
+algod(2596) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(2596) : Initializing the Algorand node...
+algod(2596) : Success!
+algod(2596) : ⇨ http server started on 127.0.0.1:35941
+algod(2596) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35941. Press Ctrl-C to exit
+algod(2613) : No REST API Token found. Generated token: 92fc6265977b9eefeeeb338f84d4181b04d1d84bfe8e5631d65dc32e58d821a5
+algod(2613) : No Admin REST API Token found. Generated token: c08f7c987ea66597c312a210dde8ec474e6256ce150247db466da5f742652a30
+algod(2613) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineState/Node/node.log
+algod(2613) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(2613) : Initializing the Algorand node...
+algod(2613) : Success!
+algod(2613) : ⇨ http server started on 127.0.0.1:40417
+algod(2613) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:40417. Press Ctrl-C to exit
+algod(2333) : Exiting on terminated
+algod(2326) : Exiting on terminated
+=== CONT TestOverlappingLeases
+--- PASS: TestAccountsCanSendMoney (31.42s)
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOverlappingLeases/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOverlappingLeases/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOverlappingLeases/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOverlappingLeases/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(2651) : No REST API Token found. Generated token: 5044e47d7d24130f465cfede46eb29baa6865c3d364fcad0bf18615d69a41bc0
+algod(2651) : No Admin REST API Token found. Generated token: a334f4d7bfda83c87be07b247e1165a6d01f5b33ff5ce0f5bcb5d3821e769ed2
+algod(2651) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOverlappingLeases/Primary/node.log
+algod(2651) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(2651) : Initializing the Algorand node...
+algod(2651) : Success!
+algod(2651) : ⇨ http server started on 127.0.0.1:8080
+algod(2651) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(2659) : No REST API Token found. Generated token: 49a41d209110733ad464d423d6d6a39a5e9dd7ab7f471a6db898b64f7fba4b45
+algod(2659) : No Admin REST API Token found. Generated token: 1f510a91143ca0f51b37266bd7510faf3bdd2e09331687023f122a199d94008f
+algod(2659) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOverlappingLeases/Node/node.log
+algod(2659) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(2659) : Initializing the Algorand node...
+algod(2659) : Success!
+algod(2659) : ⇨ http server started on 127.0.0.1:38481
+algod(2659) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:38481. Press Ctrl-C to exit
+algod(2596) : Exiting on terminated
+algod(2613) : Exiting on terminated
+--- PASS: TestAccountsCanChangeOnlineState (13.95s)
+=== CONT TestLeaseTransactionsDifferentSender
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsDifferentSender/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsDifferentSender/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsDifferentSender/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsDifferentSender/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(2747) : No REST API Token found. Generated token: d05dc47dfdcd5867b668bdbbd3329e6d9e8d353749026f2bd7d3b4f97a995577
+algod(2747) : No Admin REST API Token found. Generated token: 51671fafe290424540790330d3bcc7715ba2242eaeb77619a3f0defefe5f521c
+algod(2747) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsDifferentSender/Primary/node.log
+algod(2747) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(2747) : Initializing the Algorand node...
+algod(2747) : Success!
+algod(2747) : ⇨ http server started on 127.0.0.1:38775
+algod(2747) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:38775. Press Ctrl-C to exit
+algod(2768) : No REST API Token found. Generated token: b578cf7ea7bc909dd68652b4269d96b351acc265a66b613ffd977f2d0d22e35a
+algod(2768) : No Admin REST API Token found. Generated token: b56ef1e06674d4135077259ce907c29792a0f40281935310ce06f77fc3bc6609
+algod(2768) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsDifferentSender/Node/node.log
+algod(2768) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(2768) : Initializing the Algorand node...
+algod(2768) : Success!
+algod(2768) : ⇨ http server started on 127.0.0.1:36057
+algod(2768) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:36057. Press Ctrl-C to exit
+algod(2747) : Exiting on terminated
+algod(2768) : Exiting on terminated
+--- PASS: TestLeaseTransactionsDifferentSender (18.40s)
+=== CONT TestLeaseTransactionsSameSenderDifferentLease
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSenderDifferentLease/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSenderDifferentLease/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSenderDifferentLease/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSenderDifferentLease/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(2923) : No REST API Token found. Generated token: 3256a9cbe2fff4efa5a9ad018058d232ffd49bf838157c4b6318801c4fcb86bc
+algod(2923) : No Admin REST API Token found. Generated token: 8a81d6ad362ba976efbe15056f3f6753a0723e0110b9527fd7426f9f47360185
+algod(2923) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSenderDifferentLease/Primary/node.log
+algod(2923) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(2923) : Initializing the Algorand node...
+algod(2923) : Success!
+algod(2923) : ⇨ http server started on 127.0.0.1:46719
+algod(2923) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:46719. Press Ctrl-C to exit
+algod(2931) : No REST API Token found. Generated token: a315d15fc5cd13d6bd86ac40c56f7090e7260bbb3dc466cba8919fec9edddaee
+algod(2931) : No Admin REST API Token found. Generated token: b2662b8003c1daf70a4a677762bfa062b1b1c9d86d5f28b71c6552f2b6be5d26
+algod(2931) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSenderDifferentLease/Node/node.log
+algod(2931) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(2931) : Initializing the Algorand node...
+algod(2931) : Success!
+algod(2931) : ⇨ http server started on 127.0.0.1:37117
+algod(2931) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:37117. Press Ctrl-C to exit
+algod(2923) : Exiting on terminated
+algod(2931) : Exiting on terminated
+=== CONT TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7
+--- PASS: TestLeaseTransactionsSameSenderDifferentLease (9.77s)
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7/Wallet1.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7/Wallet2.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(2972) : No REST API Token found. Generated token: e575c48e7b32a005d6a43ba2940cbb3afb8747b2450baf25dbae1386d5c867ec
+algod(2972) : No Admin REST API Token found. Generated token: a3c31fe893487d421cc3dec4ab6d97955491e674c4727a92a3c25f58b455982e
+algod(2972) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7/Primary/node.log
+algod(2972) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(2972) : Initializing the Algorand node...
+algod(2972) : Success!
+algod(2972) : ⇨ http server started on 127.0.0.1:35385
+algod(2972) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35385. Press Ctrl-C to exit
+algod(2979) : No REST API Token found. Generated token: d1479d652dba2d951aa16dba8c439f68210c4e469dfca2e68f957dfa0de63f65
+algod(2979) : No Admin REST API Token found. Generated token: 08445f9a889abf331066bd9806d105f0c5fce41c27892ec2c8e5fcfc9b3eef59
+algod(2979) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7/Node/node.log
+algod(2979) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(2979) : Initializing the Algorand node...
+algod(2979) : Success!
+algod(2979) : ⇨ http server started on 127.0.0.1:39773
+algod(2979) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:39773. Press Ctrl-C to exit
+algod(2979) : Exiting on terminated
+algod(2972) : Exiting on terminated
+=== CONT TestAssetCreateWaitRestartDelete
+--- PASS: TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7 (14.01s)
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetCreateWaitRestartDelete/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetCreateWaitRestartDelete/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetCreateWaitRestartDelete/Wallet1.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetCreateWaitRestartDelete/Wallet2.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(3031) : No REST API Token found. Generated token: ec3d3e2bf69cbee0507d52dce6f66721d4ef85b164d5b4738ff3f881b1b8ceb8
+algod(3031) : No Admin REST API Token found. Generated token: d7041f7d88c318fba21fcdc2e5e1039c1cfaf872976502003ded1985d0ba8252
+algod(3031) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetCreateWaitRestartDelete/Primary/node.log
+algod(3031) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3031) : Initializing the Algorand node...
+algod(3031) : Success!
+algod(3031) : ⇨ http server started on 127.0.0.1:34485
+algod(3031) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:34485. Press Ctrl-C to exit
+algod(3038) : No REST API Token found. Generated token: f1035e193ec2e2d9cc9e1460861010fd7479bb8238565d72d75aa13527df0025
+algod(3038) : No Admin REST API Token found. Generated token: 8f348a2fc82ba703ea32485fa6074b6beae3bf72c87d2fc862d2ef04b50f039f
+algod(3038) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetCreateWaitRestartDelete/Node/node.log
+algod(3038) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3038) : Initializing the Algorand node...
+algod(3038) : Success!
+algod(3038) : ⇨ http server started on 127.0.0.1:35537
+algod(3038) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35537. Press Ctrl-C to exit
+algod(3038) : Exiting on terminated
+algod(3031) : Exiting on terminated
+algod(3119) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetCreateWaitRestartDelete/Primary/node.log
+algod(3119) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3119) : Initializing the Algorand node...
+algod(3119) : Success!
+algod(3119) : ⇨ http server started on 127.0.0.1:44433
+algod(3119) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:44433. Press Ctrl-C to exit
+algod(3135) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetCreateWaitRestartDelete/Node/node.log
+algod(3135) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3135) : Initializing the Algorand node...
+algod(3135) : Success!
+algod(3135) : ⇨ http server started on 127.0.0.1:41977
+algod(3135) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:41977. Press Ctrl-C to exit
+algod(3119) : Exiting on terminated
+algod(3135) : Exiting on terminated
+--- PASS: TestAssetCreateWaitRestartDelete (35.88s)
+=== CONT TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7/Wallet1.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7/Wallet2.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/57016b942f6d97e6d4c0688b373bb0a2fc85a1a2 100000
+algod(3344) : No REST API Token found. Generated token: 2cacb019662ea692fc499f1c97d97edd64d24a1e5a5ce6481d1ed33967c5a3bc
+algod(3344) : No Admin REST API Token found. Generated token: cfffb61c27a394a01065d8d9475bf0e514e5ba479a7c0c0a30eb25300fdac652
+algod(3344) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7/Primary/node.log
+algod(3344) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3344) : Initializing the Algorand node...
+algod(3344) : Success!
+algod(3344) : ⇨ http server started on 127.0.0.1:46793
+algod(3344) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:46793. Press Ctrl-C to exit
+algod(3352) : No REST API Token found. Generated token: 44f375208159d29ff0a55edff5ccdbbddafd703aae93798ccc305f78994232e4
+algod(3352) : No Admin REST API Token found. Generated token: 3830c76d22d506edc39fc82aa57144e61609594d501f98d5e65e1b4b2b81389b
+algod(3352) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7/Node/node.log
+algod(3352) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3352) : Initializing the Algorand node...
+algod(3352) : Success!
+algod(3352) : ⇨ http server started on 127.0.0.1:46863
+algod(3352) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:46863. Press Ctrl-C to exit
+algod(2659) : Exiting on terminated
+algod(2651) : Exiting on terminated
+=== CONT TestGroupTransactionsSubmission
+--- PASS: TestOverlappingLeases (92.91s)
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsSubmission/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsSubmission/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsSubmission/Wallet1.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsSubmission/Wallet2.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(3387) : No REST API Token found. Generated token: 705cc1c02ec3439405f458d850aeff8905f493cf165f3fb6bc7d987edec8b44e
+algod(3387) : No Admin REST API Token found. Generated token: 2268a1cc7f749ecec27494a0891a9a04ab11b9682544d89f451468834203a0dd
+algod(3387) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsSubmission/Primary/node.log
+algod(3387) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3387) : Initializing the Algorand node...
+algod(3387) : Success!
+algod(3387) : ⇨ http server started on 127.0.0.1:8080
+algod(3387) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(3394) : No REST API Token found. Generated token: 7549713d9c9cb41f7208451e94b461de7f4b921adb2c5ddaa6ab9b9de4f857cd
+algod(3394) : No Admin REST API Token found. Generated token: cb1dd6acc299f981f1f62e8be47c8406efa7ef93a5ddc04c81bedc0778d0bc5b
+algod(3394) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsSubmission/Node/node.log
+algod(3394) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3394) : Initializing the Algorand node...
+algod(3394) : Success!
+algod(3394) : ⇨ http server started on 127.0.0.1:37777
+algod(3394) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:37777. Press Ctrl-C to exit
+algod(3394) : Exiting on terminated
+algod(3387) : Exiting on terminated
+--- PASS: TestGroupTransactionsSubmission (7.79s)
+=== CONT TestGroupTransactionsDifferentSizes
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsDifferentSizes/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsDifferentSizes/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsDifferentSizes/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsDifferentSizes/Wallet1.0.3000000.partkey
+future 100000
+algod(3431) : No REST API Token found. Generated token: 51ed572ae8e715a9fd93e4396c77bac9ff68ac070f999ae9e143e6fc631f2fd1
+algod(3431) : No Admin REST API Token found. Generated token: 90a79eac61e3f4fa1286fbaea51b364b6bacb63a5d1eaf08855f324eff7de67b
+algod(3431) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsDifferentSizes/Primary/node.log
+algod(3431) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3431) : Initializing the Algorand node...
+algod(3431) : Success!
+algod(3431) : ⇨ http server started on 127.0.0.1:8080
+algod(3431) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(3438) : No REST API Token found. Generated token: 0e48cda927a45a6f928fd54a9bd1ca8ffa3eff504f32f389445d3577b175a480
+algod(3438) : No Admin REST API Token found. Generated token: a0583194e478b515638491d0c3b973c817237e8a90ea8f028270680c43146610
+algod(3438) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsDifferentSizes/Node/node.log
+algod(3438) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3438) : Initializing the Algorand node...
+algod(3438) : Success!
+algod(3438) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:46151. Press Ctrl-C to exit
+algod(3438) : ⇨ http server started on 127.0.0.1:46151
+algod(3352) : Exiting on terminated
+algod(3344) : Exiting on terminated
+--- PASS: TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7 (17.76s)
+=== CONT TestAccountsCanClose
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanClose/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanClose/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanClose/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanClose/Wallet1.0.3000000.partkey
+https://github.com/algorand/spec/tree/a26ed78ed8f834e2b9ccb6eb7d3ee9f629a6e622 100000
+algod(3462) : No REST API Token found. Generated token: bed0457f34050eee13746a4d1c6ac74b9b71c5179e4fb0529d7adee760ae882a
+algod(3462) : No Admin REST API Token found. Generated token: 4ac2e2c9c0f69bd6ca3867bfaf2eed6e6fd09b014ab53253c16d6612b75d228a
+algod(3462) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanClose/Primary/node.log
+algod(3462) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3462) : Initializing the Algorand node...
+algod(3462) : Success!
+algod(3462) : ⇨ http server started on 127.0.0.1:35331
+algod(3462) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35331. Press Ctrl-C to exit
+algod(3469) : No REST API Token found. Generated token: 82975d866965c6e2df5c26b97b379f81f7aa83330a4be4c6dc283979aa506ec6
+algod(3469) : No Admin REST API Token found. Generated token: 84d4ba6080b608544939119a7ca223b44fa7e5124ff05d002aca68cd7857473f
+algod(3469) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanClose/Node/node.log
+algod(3469) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3469) : Initializing the Algorand node...
+algod(3469) : Success!
+algod(3469) : ⇨ http server started on 127.0.0.1:33011
+algod(3469) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:33011. Press Ctrl-C to exit
+algod(3431) : Exiting on terminated
+algod(3438) : Exiting on terminated
+--- PASS: TestGroupTransactionsDifferentSizes (34.62s)
+=== CONT TestAssetSend
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetSend/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetSend/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetSend/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetSend/Wallet1.0.3000000.partkey
+future 100000
+algod(3568) : No REST API Token found. Generated token: 55158e9028c78273ec427ffd00d2eb41006c8589068b30239afce8281d2b653a
+algod(3568) : No Admin REST API Token found. Generated token: e0a7fe5e318af5f257cdff7cfc62b2b2e8903e0f352fdc42f73938cb2295df99
+algod(3568) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetSend/Primary/node.log
+algod(3568) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3568) : Initializing the Algorand node...
+algod(3568) : Success!
+algod(3568) : ⇨ http server started on 127.0.0.1:8080
+algod(3568) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(3575) : No REST API Token found. Generated token: a18a0a80459d66929f3b0d5e9099d77042e20541336125eff33dbbabfe3e5afd
+algod(3575) : No Admin REST API Token found. Generated token: 4e1b7c43e673ba832167611db8cb1095eff35e9c60362e2529a745424ee08786
+algod(3575) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetSend/Node/node.log
+algod(3575) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3575) : Initializing the Algorand node...
+algod(3575) : Success!
+algod(3575) : ⇨ http server started on 127.0.0.1:39309
+algod(3575) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:39309. Press Ctrl-C to exit
+algod(3462) : Exiting on terminated
+algod(3469) : Exiting on terminated
+=== CONT TestGroupTransactions
+--- PASS: TestAccountsCanClose (34.81s)
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactions/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactions/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactions/Wallet1.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactions/Wallet2.0.3000000.partkey
+future 100000
+algod(3599) : No REST API Token found. Generated token: b7ce42035e67094b77e66ba60fef420e7a65b09b357f16cfb413db066d59f675
+algod(3599) : No Admin REST API Token found. Generated token: 319bc581de0b5c951338d18d0386cf66b443cb0c23b8d454cd6ba3c0c90f9b67
+algod(3599) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactions/Primary/node.log
+algod(3599) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3599) : Initializing the Algorand node...
+algod(3599) : Success!
+algod(3599) : ⇨ http server started on 127.0.0.1:36173
+algod(3599) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:36173. Press Ctrl-C to exit
+algod(3607) : No REST API Token found. Generated token: b9180f9fcaba5b8511b8404c4b56d17319047f5c028a8b7c52fd96e5ab3917b2
+algod(3607) : No Admin REST API Token found. Generated token: 2219dee18ae2f15f83d68236cf8c1f5be711558dd2871036ff846d299ee00071
+algod(3607) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactions/Node/node.log
+algod(3607) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3607) : Initializing the Algorand node...
+algod(3607) : Success!
+algod(3607) : ⇨ http server started on 127.0.0.1:41333
+algod(3607) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:41333. Press Ctrl-C to exit
+algod(3599) : Exiting on terminated
+algod(3607) : Exiting on terminated
+=== CONT TestAssetGroupCreateSendDestroy
+--- PASS: TestGroupTransactions (30.62s)
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetGroupCreateSendDestroy/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetGroupCreateSendDestroy/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetGroupCreateSendDestroy/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetGroupCreateSendDestroy/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(3694) : No REST API Token found. Generated token: 4e8d9a408c8586b9707cac412189a912e52e0767d2a4de37a9d0721473e39bd5
+algod(3694) : No Admin REST API Token found. Generated token: 972bbd1343961f18ccf3c43e81479387537c9cd647d28f990a85657f8543e01c
+algod(3694) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetGroupCreateSendDestroy/Primary/node.log
+algod(3694) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3694) : Initializing the Algorand node...
+algod(3694) : Success!
+algod(3694) : ⇨ http server started on 127.0.0.1:42547
+algod(3694) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:42547. Press Ctrl-C to exit
+algod(3701) : No REST API Token found. Generated token: 09392c60ad772ac0f1fc4244a8d2fbe674e1d364916c8609b12f3a4086e722b3
+algod(3701) : No Admin REST API Token found. Generated token: d56b394192c1761209d5f4e867c2083abbf367b755fa5b150c69a2b0012a6ab8
+algod(3701) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetGroupCreateSendDestroy/Node/node.log
+algod(3701) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3701) : Initializing the Algorand node...
+algod(3701) : Success!
+algod(3701) : ⇨ http server started on 127.0.0.1:46421
+algod(3701) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:46421. Press Ctrl-C to exit
+algod(3575) : Exiting on terminated
+algod(3568) : Exiting on terminated
+=== CONT TestAssetValidRounds
+--- PASS: TestAssetSend (34.51s)
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetValidRounds/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetValidRounds/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetValidRounds/Wallet1.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetValidRounds/Wallet2.0.3000000.partkey
+future 100000
+algod(3723) : No REST API Token found. Generated token: 7ab8df151581af28f266c667a833d26841b08e511d73b57057ca3da12e412a9a
+algod(3723) : No Admin REST API Token found. Generated token: 2385a69c299f062884bd472f01a6e68231ee4d8feaf3d6d5d5a0288694239bc9
+algod(3723) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetValidRounds/Primary/node.log
+algod(3723) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3723) : Initializing the Algorand node...
+algod(3723) : Success!
+algod(3723) : ⇨ http server started on 127.0.0.1:8080
+algod(3723) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(3730) : No REST API Token found. Generated token: 911ef1829965d0c1877d02d1e9ae1e216e13a8a4285194885bf0da84ebfe2b74
+algod(3730) : No Admin REST API Token found. Generated token: 8e0d38847c4c0f19aab77e4c5059c2871b8d4013022e2fa81132594c390123a1
+algod(3730) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetValidRounds/Node/node.log
+algod(3730) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3730) : Initializing the Algorand node...
+algod(3730) : Success!
+algod(3730) : ⇨ http server started on 127.0.0.1:42821
+algod(3730) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:42821. Press Ctrl-C to exit
+algod(3730) : Exiting on terminated
+algod(3723) : Exiting on terminated
+--- PASS: TestAssetValidRounds (10.39s)
+=== CONT TestAccountInformationV2
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountInformationV2/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountInformationV2/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountInformationV2/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountInformationV2/Wallet1.0.3000000.partkey
+future 100000
+algod(3805) : No REST API Token found. Generated token: 48c890a0228705e1d0e8d379fa012a47a274369ff5ce97adcfbc05c124a382be
+algod(3805) : No Admin REST API Token found. Generated token: d0abe6f7ef9bd29558b634a9c3060f87e8775cace24b7da135f61a644e2d35aa
+algod(3805) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountInformationV2/Primary/node.log
+algod(3805) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3805) : Initializing the Algorand node...
+algod(3805) : Success!
+algod(3805) : ⇨ http server started on 127.0.0.1:37935
+algod(3805) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:37935. Press Ctrl-C to exit
+algod(3817) : No REST API Token found. Generated token: 3fc86480598a43f2aaaf5eb8b6e67ea80df2bf22ec914ba4b510f397d966a271
+algod(3817) : No Admin REST API Token found. Generated token: 25892b8667414fe9cf4b9edb5325abae3f2bdc72749fa8aa9815a5b6b2a14387
+algod(3817) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountInformationV2/Node/node.log
+algod(3817) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(3817) : Initializing the Algorand node...
+algod(3817) : Success!
+algod(3817) : ⇨ http server started on 127.0.0.1:35409
+algod(3817) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35409. Press Ctrl-C to exit
+algod(3701) : Exiting on terminated
+algod(3694) : Exiting on terminated
+--- PASS: TestAssetGroupCreateSendDestroy (31.74s)
+algod(3805) : Exiting on terminated
+algod(3817) : Exiting on terminated
+--- PASS: TestAccountInformationV2 (28.95s)
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/features/transactions 270.274s
+? github.com/algorand/go-algorand/test/e2e-go/globals [no test files]
+=== RUN TestServerStartsStopsSuccessfully
+=== PAUSE TestServerStartsStopsSuccessfully
+=== RUN TestBadAuthFails
+=== PAUSE TestBadAuthFails
+=== RUN TestGoodAuthSucceeds
+=== PAUSE TestGoodAuthSucceeds
+=== RUN TestNonAbsSQLiteWalletConfigFails
+=== PAUSE TestNonAbsSQLiteWalletConfigFails
+=== RUN TestAbsSQLiteWalletConfigSucceeds
+=== PAUSE TestAbsSQLiteWalletConfigSucceeds
+=== RUN TestGenerateAndListKeys
+=== PAUSE TestGenerateAndListKeys
+=== RUN TestImportKey
+=== PAUSE TestImportKey
+=== RUN TestExportKey
+=== PAUSE TestExportKey
+=== RUN TestDeleteKey
+=== PAUSE TestDeleteKey
+=== RUN TestSignTransaction
+=== PAUSE TestSignTransaction
+=== RUN TestSignProgram
+=== PAUSE TestSignProgram
+=== RUN TestMasterKeyImportExport
+=== PAUSE TestMasterKeyImportExport
+=== RUN TestMasterKeyGeneratePastImportedKeys
+=== PAUSE TestMasterKeyGeneratePastImportedKeys
+=== RUN TestMultisigImportList
+=== PAUSE TestMultisigImportList
+=== RUN TestMultisigExportDelete
+=== PAUSE TestMultisigExportDelete
+=== RUN TestMultisigSign
+=== PAUSE TestMultisigSign
+=== RUN TestMultisigSignWithSigner
+=== PAUSE TestMultisigSignWithSigner
+=== RUN TestMultisigSignWithWrongSigner
+=== PAUSE TestMultisigSignWithWrongSigner
+=== RUN TestMultisigSignProgram
+=== PAUSE TestMultisigSignProgram
+=== RUN TestWalletCreation
+=== PAUSE TestWalletCreation
+=== RUN TestBlankWalletCreation
+=== PAUSE TestBlankWalletCreation
+=== RUN TestWalletRename
+=== PAUSE TestWalletRename
+=== RUN TestWalletSessionRelease
+=== PAUSE TestWalletSessionRelease
+=== RUN TestWalletSessionRenew
+=== PAUSE TestWalletSessionRenew
+=== RUN TestWalletSessionExpiry
+=== PAUSE TestWalletSessionExpiry
+=== CONT TestGoodAuthSucceeds
+=== CONT TestMasterKeyGeneratePastImportedKeys
+--- PASS: TestGoodAuthSucceeds (0.21s)
+=== CONT TestMultisigImportList
+--- PASS: TestMasterKeyGeneratePastImportedKeys (0.30s)
+=== CONT TestSignProgram
+=== CONT TestMasterKeyImportExport
+--- PASS: TestMultisigImportList (0.26s)
+=== CONT TestDeleteKey
+--- PASS: TestSignProgram (0.25s)
+--- PASS: TestMasterKeyImportExport (0.32s)
+=== CONT TestSignTransaction
+--- PASS: TestDeleteKey (0.25s)
+=== CONT TestImportKey
+--- PASS: TestSignTransaction (0.26s)
+=== CONT TestGenerateAndListKeys
+--- PASS: TestImportKey (0.26s)
+=== CONT TestAbsSQLiteWalletConfigSucceeds
+--- PASS: TestAbsSQLiteWalletConfigSucceeds (0.00s)
+=== CONT TestExportKey
+--- PASS: TestExportKey (0.26s)
+=== CONT TestNonAbsSQLiteWalletConfigFails
+--- PASS: TestNonAbsSQLiteWalletConfigFails (0.00s)
+=== CONT TestBadAuthFails
+--- PASS: TestGenerateAndListKeys (0.27s)
+=== CONT TestServerStartsStopsSuccessfully
+--- PASS: TestBadAuthFails (0.21s)
+=== CONT TestWalletCreation
+--- PASS: TestServerStartsStopsSuccessfully (0.21s)
+=== CONT TestWalletSessionExpiry
+=== CONT TestWalletSessionRenew
+--- PASS: TestWalletCreation (0.23s)
+--- PASS: TestWalletSessionExpiry (2.23s)
+=== CONT TestWalletRename
+=== CONT TestBlankWalletCreation
+--- PASS: TestWalletSessionRenew (2.23s)
+--- PASS: TestWalletRename (0.24s)
+=== CONT TestMultisigSignWithSigner
+=== CONT TestWalletSessionRelease
+--- PASS: TestBlankWalletCreation (0.23s)
+=== CONT TestMultisigSignWithWrongSigner
+--- PASS: TestMultisigSignWithSigner (0.25s)
+--- PASS: TestWalletSessionRelease (0.23s)
+=== CONT TestMultisigSignProgram
+--- PASS: TestMultisigSignWithWrongSigner (0.26s)
+=== CONT TestMultisigSign
+--- PASS: TestMultisigSignProgram (0.26s)
+=== CONT TestMultisigExportDelete
+--- PASS: TestMultisigSign (0.25s)
+--- PASS: TestMultisigExportDelete (0.25s)
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/kmd 5.048s
+testing: warning: no tests to run
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/perf 0.069s [no tests to run]
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/RestClientTests/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/RestClientTests/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/RestClientTests/Wallet1.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/RestClientTests/Wallet2.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(4283) : No REST API Token found. Generated token: c76798a34a35657df663a6c0b6a81cb39ff4a67359c32b63943204829b79d87c
+algod(4283) : No Admin REST API Token found. Generated token: 69e7af0bc9e5973a855e6d28859b7345a63236b05a091b84d74ce74cda33085b
+algod(4283) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/RestClientTests/Primary/node.log
+algod(4283) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(4283) : Initializing the Algorand node...
+algod(4283) : Success!
+algod(4283) : ⇨ http server started on 127.0.0.1:38947
+algod(4283) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:38947. Press Ctrl-C to exit
+algod(4291) : No REST API Token found. Generated token: 0e846834bba67419a34ace81b938e33f0c9dc012d23929aee2ff583e40be9277
+algod(4291) : No Admin REST API Token found. Generated token: a64db0006bbabb7cbfbba67e8d980ed9669998aee89f02fe99d4b7ac55773063
+algod(4291) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/RestClientTests/Node/node.log
+algod(4291) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(4291) : Initializing the Algorand node...
+algod(4291) : Success!
+algod(4291) : ⇨ http server started on 127.0.0.1:34379
+algod(4291) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:34379. Press Ctrl-C to exit
+=== RUN TestClientCanGetStatus
+--- PASS: TestClientCanGetStatus (0.01s)
+=== RUN TestClientCanGetStatusAfterBlock
+--- PASS: TestClientCanGetStatusAfterBlock (13.80s)
+=== RUN TestTransactionsByAddr
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestTransactionsByAddr/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestTransactionsByAddr/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestTransactionsByAddr/Wallet1.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestTransactionsByAddr/Wallet2.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(4336) : No REST API Token found. Generated token: 475c456823a93eec658a1cce2534bd90e3019eeaca13a6aded647d92a3475168
+algod(4336) : No Admin REST API Token found. Generated token: 36c927d2e19fc8c79c8adac754948e6f24b8f1c57f8039fe801578c35db0c365
+algod(4336) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestTransactionsByAddr/Primary/node.log
+algod(4336) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(4336) : Initializing the Algorand node...
+algod(4336) : Success!
+algod(4336) : ⇨ http server started on 127.0.0.1:40873
+algod(4336) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:40873. Press Ctrl-C to exit
+algod(4343) : No REST API Token found. Generated token: f7c522f3ef7668ffde8df7fae80edf803bc5e621db32674c6f752d209a252161
+algod(4343) : No Admin REST API Token found. Generated token: 70335dc94d7f3adcfd32e57ece56514b3637015e9d6e9ba02a3db6157b9a2527
+algod(4343) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestTransactionsByAddr/Node/node.log
+algod(4343) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(4343) : Initializing the Algorand node...
+algod(4343) : Success!
+algod(4343) : ⇨ http server started on 127.0.0.1:33153
+algod(4343) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:33153. Press Ctrl-C to exit
+ restClient_test.go:230: rnd[2] created txn TJFY6J2BGXMNQPFBAZEZWCGZPSA2HZFTK3JI3RQ32Y4ZR354KERA
+ restClient_test.go:237: rnd 4
+algod(4336) : Exiting on terminated
+algod(4343) : Exiting on terminated
+--- PASS: TestTransactionsByAddr (17.97s)
+=== RUN TestClientCanGetVersion
+--- PASS: TestClientCanGetVersion (0.00s)
+=== RUN TestClientCanGetSuggestedFee
+--- PASS: TestClientCanGetSuggestedFee (0.02s)
+=== RUN TestClientCanGetMinTxnFee
+--- PASS: TestClientCanGetMinTxnFee (0.01s)
+=== RUN TestClientCanGetBlockInfo
+--- PASS: TestClientCanGetBlockInfo (0.03s)
+=== RUN TestClientRejectsBadFromAddressWhenSending
+--- PASS: TestClientRejectsBadFromAddressWhenSending (0.01s)
+=== RUN TestClientRejectsBadToAddressWhenSending
+--- PASS: TestClientRejectsBadToAddressWhenSending (0.02s)
+=== RUN TestClientRejectsMutatedFromAddressWhenSending
+--- PASS: TestClientRejectsMutatedFromAddressWhenSending (0.02s)
+=== RUN TestClientRejectsMutatedToAddressWhenSending
+--- PASS: TestClientRejectsMutatedToAddressWhenSending (0.01s)
+=== RUN TestClientRejectsSendingMoneyFromAccountForWhichItHasNoKey
+--- PASS: TestClientRejectsSendingMoneyFromAccountForWhichItHasNoKey (0.01s)
+=== RUN TestClientOversizedNote
+--- PASS: TestClientOversizedNote (0.02s)
+=== RUN TestClientCanSendAndGetNote
+--- PASS: TestClientCanSendAndGetNote (7.32s)
+=== RUN TestClientCanGetTransactionStatus
+ restClient_test.go:424: {
+ "amt": 100000,
+ "fee": 10000,
+ "fv": 11,
+ "gen": "test-v1",
+ "gh": "TFra7RvB7ra3ON9ZblZuqoCHu6gWiwpVHfGr0eN1C9s=",
+ "lv": 1011,
+ "rcv": "YSCN4ZDAXRU2WO2W2WNDCWBVRQKZETY2QF2YTI2LGBLPVUBNZZLBXOEKQI",
+ "snd": "3BWJVN4HGXGNLN2OUBWGJINILE575VG4F6BTHE4NWOIUEXK3YOCHALOKPA",
+ "type": "pay"
+ }
+ restClient_test.go:426: 6NQGEVFKIYCCVKQKGICVKLHCH24ZYTDVWHBE553W7DF53BPYE4OQ
+--- PASS: TestClientCanGetTransactionStatus (8.23s)
+=== RUN TestAccountBalance
+--- PASS: TestAccountBalance (8.33s)
+=== RUN TestAccountParticipationInfo
+--- PASS: TestAccountParticipationInfo (8.33s)
+=== RUN TestSupply
+--- PASS: TestSupply (0.00s)
+=== RUN TestClientCanGetGoRoutines
+--- PASS: TestClientCanGetGoRoutines (0.01s)
+=== RUN TestSendingTooMuchFails
+ restClient_test.go:557: HTTP 400 Bad Request: TransactionPool.Remember: transaction HV3NYJPDGYHZH6LH4IBB54BAACA2F45B552S4JQA4CD56SD5EPKA: overspend (account P7LZTPX5YJZ7YG5QEQWMQH54CBPIKATUQMIWS6BK6BJ7HYDNJJLFZC75XA, data {_struct:{} Status:Offline MicroAlgos:{Raw:90000} RewardsBase:0 RewardedMicroAlgos:{Raw:0} VoteID:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] SelectionID:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] VoteFirstValid:0 VoteLastValid:0 VoteKeyDilution:0 AssetParams:map[] Assets:map[] AuthAddr:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ AppLocalStates:map[] AppParams:map[] TotalAppSchema:{_struct:{} NumUint:0 NumByteSlice:0}}, tried to spend {100100})
+ restClient_test.go:562: HTTP 400 Bad Request: TransactionPool.Remember: transaction IQEXYSWOWK6O6NNE75AEV6VLPBZFKPKES4EYBTSPCG6MCOVDASQQ: overspend (account P7LZTPX5YJZ7YG5QEQWMQH54CBPIKATUQMIWS6BK6BJ7HYDNJJLFZC75XA, data {_struct:{} Status:Offline MicroAlgos:{Raw:90000} RewardsBase:0 RewardedMicroAlgos:{Raw:0} VoteID:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] SelectionID:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] VoteFirstValid:0 VoteLastValid:0 VoteKeyDilution:0 AssetParams:map[] Assets:map[] AuthAddr:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ AppLocalStates:map[] AppParams:map[] TotalAppSchema:{_struct:{} NumUint:0 NumByteSlice:0}}, tried to spend {18446744073709551615})
+ restClient_test.go:567: HTTP 400 Bad Request: TransactionPool.Remember: transaction EARCABJLCKMRRBEZW2DGRVPBRKVS6D6TILGS3C6ZZEZWAE4E2AQQ: overspend (account P7LZTPX5YJZ7YG5QEQWMQH54CBPIKATUQMIWS6BK6BJ7HYDNJJLFZC75XA, data {_struct:{} Status:Offline MicroAlgos:{Raw:100000} RewardsBase:0 RewardedMicroAlgos:{Raw:0} VoteID:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] SelectionID:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] VoteFirstValid:0 VoteLastValid:0 VoteKeyDilution:0 AssetParams:map[] Assets:map[] AuthAddr:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ AppLocalStates:map[] AppParams:map[] TotalAppSchema:{_struct:{} NumUint:0 NumByteSlice:0}}, tried to spend {100100})
+ restClient_test.go:572: HTTP 400 Bad Request: TransactionPool.Remember: transaction W64ZQNQU4QF4KGG562ZHVM4KY2C5IOMJOU6ZGOKL2MC7USAAQWIA: overspend (account P7LZTPX5YJZ7YG5QEQWMQH54CBPIKATUQMIWS6BK6BJ7HYDNJJLFZC75XA, data {_struct:{} Status:Offline MicroAlgos:{Raw:100000} RewardsBase:0 RewardedMicroAlgos:{Raw:0} VoteID:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] SelectionID:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] VoteFirstValid:0 VoteLastValid:0 VoteKeyDilution:0 AssetParams:map[] Assets:map[] AuthAddr:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ AppLocalStates:map[] AppParams:map[] TotalAppSchema:{_struct:{} NumUint:0 NumByteSlice:0}}, tried to spend {18446744073709551615})
+--- PASS: TestSendingTooMuchFails (0.07s)
+=== RUN TestSendingFromEmptyAccountFails
+--- PASS: TestSendingFromEmptyAccountFails (0.04s)
+=== RUN TestSendingTooLittleToEmptyAccountFails
+--- PASS: TestSendingTooLittleToEmptyAccountFails (0.06s)
+=== RUN TestSendingLowFeeFails
+ restClient_test.go:663: HTTP 400 Bad Request: transaction {_struct:{} Sig:[216 70 241 73 25 40 190 87 19 12 178 201 6 82 78 8 43 150 231 41 157 153 251 241 70 143 175 59 174 26 68 48 198 95 50 97 16 28 204 12 154 168 200 167 230 134 48 209 216 242 56 31 96 75 79 204 81 249 232 82 170 48 118 13] Msig:{_struct:{} Version:0 Threshold:0 Subsigs:[]} Lsig:{_struct:{} Logic:[] Sig:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] Msig:{_struct:{} Version:0 Threshold:0 Subsigs:[]} Args:[]} Txn:{_struct:{} Type:pay Header:{_struct:{} Sender:3BWJVN4HGXGNLN2OUBWGJINILE575VG4F6BTHE4NWOIUEXK3YOCHALOKPA Fee:{Raw:1} FirstValid:17 LastValid:1017 Note:[] GenesisID:test-v1 GenesisHash:JRNNV3I3YHXLNNZY35MW4VTOVKAIPO5IC2FQUVI56GV5DY3VBPNQ Group:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA Lease:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] RekeyTo:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ} KeyregTxnFields:{_struct:{} VotePK:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] SelectionPK:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] VoteFirst:0 VoteLast:0 VoteKeyDilution:0 Nonparticipation:false} PaymentTxnFields:{_struct:{} Receiver:P7LZTPX5YJZ7YG5QEQWMQH54CBPIKATUQMIWS6BK6BJ7HYDNJJLFZC75XA Amount:{Raw:100000} CloseRemainderTo:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ} AssetConfigTxnFields:{_struct:{} ConfigAsset:0 AssetParams:{_struct:{} Total:0 Decimals:0 DefaultFrozen:false UnitName: AssetName: URL: MetadataHash:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] Manager:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ Reserve:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ Freeze:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ Clawback:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ}} AssetTransferTxnFields:{_struct:{} XferAsset:0 AssetAmount:0 AssetSender:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ AssetReceiver:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ AssetCloseTo:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ} AssetFreezeTxnFields:{_struct:{} FreezeAccount:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ FreezeAsset:0 AssetFrozen:false} ApplicationCallTxnFields:{_struct:{} ApplicationID:0 OnCompletion:NoOpOC ApplicationArgs:[] Accounts:[] ForeignApps:[] ForeignAssets:[] LocalStateSchema:{_struct:{} NumUint:0 NumByteSlice:0} GlobalStateSchema:{_struct:{} NumUint:0 NumByteSlice:0} ApprovalProgram:[] ClearStateProgram:[]} CompactCertTxnFields:{_struct:{} CertRound:0 CertType:0 Cert:{_struct:{} SigCommit:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA SignedWeight:0 SigProofs:[] PartProofs:[] Reveals:map[]}}} AuthAddr:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ} invalid : transaction had fee 1, which is less than the minimum 1000
+ restClient_test.go:669: HTTP 400 Bad Request: transaction {_struct:{} Sig:[65 160 77 104 227 30 37 105 31 244 54 175 138 167 38 34 129 192 253 108 205 101 142 23 42 251 126 196 80 218 31 185 178 104 206 9 2 4 120 11 111 70 174 129 124 145 164 237 80 203 76 44 155 138 146 212 44 66 121 157 21 188 47 0] Msig:{_struct:{} Version:0 Threshold:0 Subsigs:[]} Lsig:{_struct:{} Logic:[] Sig:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] Msig:{_struct:{} Version:0 Threshold:0 Subsigs:[]} Args:[]} Txn:{_struct:{} Type:pay Header:{_struct:{} Sender:3BWJVN4HGXGNLN2OUBWGJINILE575VG4F6BTHE4NWOIUEXK3YOCHALOKPA Fee:{Raw:0} FirstValid:17 LastValid:1017 Note:[] GenesisID:test-v1 GenesisHash:JRNNV3I3YHXLNNZY35MW4VTOVKAIPO5IC2FQUVI56GV5DY3VBPNQ Group:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA Lease:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] RekeyTo:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ} KeyregTxnFields:{_struct:{} VotePK:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] SelectionPK:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] VoteFirst:0 VoteLast:0 VoteKeyDilution:0 Nonparticipation:false} PaymentTxnFields:{_struct:{} Receiver:P7LZTPX5YJZ7YG5QEQWMQH54CBPIKATUQMIWS6BK6BJ7HYDNJJLFZC75XA Amount:{Raw:100000} CloseRemainderTo:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ} AssetConfigTxnFields:{_struct:{} ConfigAsset:0 AssetParams:{_struct:{} Total:0 Decimals:0 DefaultFrozen:false UnitName: AssetName: URL: MetadataHash:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] Manager:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ Reserve:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ Freeze:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ Clawback:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ}} AssetTransferTxnFields:{_struct:{} XferAsset:0 AssetAmount:0 AssetSender:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ AssetReceiver:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ AssetCloseTo:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ} AssetFreezeTxnFields:{_struct:{} FreezeAccount:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ FreezeAsset:0 AssetFrozen:false} ApplicationCallTxnFields:{_struct:{} ApplicationID:0 OnCompletion:NoOpOC ApplicationArgs:[] Accounts:[] ForeignApps:[] ForeignAssets:[] LocalStateSchema:{_struct:{} NumUint:0 NumByteSlice:0} GlobalStateSchema:{_struct:{} NumUint:0 NumByteSlice:0} ApprovalProgram:[] ClearStateProgram:[]} CompactCertTxnFields:{_struct:{} CertRound:0 CertType:0 Cert:{_struct:{} SigCommit:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA SignedWeight:0 SigProofs:[] PartProofs:[] Reveals:map[]}}} AuthAddr:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ} invalid : transaction had fee 0, which is less than the minimum 1000
+--- PASS: TestSendingLowFeeFails (0.07s)
+=== RUN TestSendingNotClosingAccountFails
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestSendingNotClosingAccountFails/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestSendingNotClosingAccountFails/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestSendingNotClosingAccountFails/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestSendingNotClosingAccountFails/Wallet2.0.3000000.partkey
+algod(4606) : No REST API Token found. Generated token: b3f22a106eb6122ba6ebb7ad5f5cad4ab76e4ff00be9cea089fa552c85d5cbd3
+algod(4606) : No Admin REST API Token found. Generated token: 7b59e843d468b91e7501755971d61821a89b7a1c3a38f9082d32aef207ec1ee1
+algod(4606) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestSendingNotClosingAccountFails/Primary/node.log
+algod(4606) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(4606) : Initializing the Algorand node...
+algod(4606) : Success!
+algod(4606) : ⇨ http server started on 127.0.0.1:37903
+algod(4606) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:37903. Press Ctrl-C to exit
+algod(4613) : No REST API Token found. Generated token: 3629a039e9d20f5fa4a7c402082cad5a52528c011fcd4d6a398790b6d7034682
+algod(4613) : No Admin REST API Token found. Generated token: 588c2499a87d2439f094806ee562af55ab8b7e6020704cd6e7962c601c9233e9
+algod(4613) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestSendingNotClosingAccountFails/Node/node.log
+algod(4613) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(4613) : Initializing the Algorand node...
+algod(4613) : Success!
+algod(4613) : ⇨ http server started on 127.0.0.1:42947
+algod(4613) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:42947. Press Ctrl-C to exit
+algod(4613) : Exiting on terminated
+algod(4606) : Exiting on terminated
+--- PASS: TestSendingNotClosingAccountFails (3.89s)
+=== RUN TestClientCanGetPendingTransactions
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientCanGetPendingTransactions/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientCanGetPendingTransactions/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientCanGetPendingTransactions/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientCanGetPendingTransactions/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(4637) : No REST API Token found. Generated token: 5debbf720dd845682f9b78137216b0adf9fce8379e1d127f945b20af3c559d4a
+algod(4637) : No Admin REST API Token found. Generated token: 0be670daf19d8bd88830c8817b35d44eb823ca048359f57873b24a06b8eea831
+algod(4637) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientCanGetPendingTransactions/Primary/node.log
+algod(4637) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(4637) : Initializing the Algorand node...
+algod(4637) : Success!
+algod(4637) : ⇨ http server started on 127.0.0.1:42439
+algod(4637) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:42439. Press Ctrl-C to exit
+algod(4645) : No REST API Token found. Generated token: 13b1a26df08074ad74dfdb621d686293190ee02664685de45c42b743cf1d50c2
+algod(4645) : No Admin REST API Token found. Generated token: df4f852af3e5dab9e157593c2a6e3854470213262853dc1926a89d11db12a3ed
+algod(4645) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientCanGetPendingTransactions/Node/node.log
+algod(4645) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(4645) : Initializing the Algorand node...
+algod(4645) : Success!
+algod(4645) : ⇨ http server started on 127.0.0.1:39457
+algod(4645) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:39457. Press Ctrl-C to exit
+algod(4645) : Exiting on terminated
+algod(4637) : Exiting on terminated
+--- PASS: TestClientCanGetPendingTransactions (8.20s)
+=== RUN TestClientTruncatesPendingTransactions
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientTruncatesPendingTransactions/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientTruncatesPendingTransactions/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientTruncatesPendingTransactions/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientTruncatesPendingTransactions/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(4680) : No REST API Token found. Generated token: 0a26701d5d1dcbc52f2a51af23e7b73c94b27fbaf878929d169b5b6d0520564e
+algod(4680) : No Admin REST API Token found. Generated token: 9a11b167638a5629daf54bbccb6aa5c948ea18010296339b153e461553c4bb38
+algod(4680) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientTruncatesPendingTransactions/Primary/node.log
+algod(4680) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(4680) : Initializing the Algorand node...
+algod(4680) : Success!
+algod(4680) : ⇨ http server started on 127.0.0.1:34217
+algod(4680) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:34217. Press Ctrl-C to exit
+algod(4687) : No REST API Token found. Generated token: 3fd8152e288635ae00d5ee39ac4337c2cd74fd6b807ba69ac83d65f070e5be08
+algod(4687) : No Admin REST API Token found. Generated token: bddf2d08ad914b1e5d7453a6d396aeb53a6092b32d26d482338f369f59c1f61f
+algod(4687) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientTruncatesPendingTransactions/Node/node.log
+algod(4687) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(4687) : Initializing the Algorand node...
+algod(4687) : Success!
+algod(4687) : ⇨ http server started on 127.0.0.1:39073
+algod(4687) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:39073. Press Ctrl-C to exit
+algod(4687) : Exiting on terminated
+algod(4680) : Exiting on terminated
+--- PASS: TestClientTruncatesPendingTransactions (9.60s)
+=== RUN TestClientPrioritizesPendingTransactions
+ restClient_test.go:785: new FIFO pool does not have prioritization
+--- SKIP: TestClientPrioritizesPendingTransactions (0.00s)
+PASS
+algod(4291) : Exiting on terminated
+algod(4283) : Exiting on terminated
+ok github.com/algorand/go-algorand/test/e2e-go/restAPI 89.426s
+=== RUN TestManyAccountsCanGoOnline
+=== PAUSE TestManyAccountsCanGoOnline
+=== CONT TestManyAccountsCanGoOnline
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestManyAccountsCanGoOnline/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestManyAccountsCanGoOnline/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestManyAccountsCanGoOnline/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestManyAccountsCanGoOnline/Wallet1.0.3000000.partkey
+https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000
+algod(4769) : No REST API Token found. Generated token: 88327cd4affa9699a330e54d6579146ad1d6768ce66e64d6ed6b437d1df97981
+algod(4769) : No Admin REST API Token found. Generated token: 9dd29f02c9ea2430d1fb553c8a36118036ba6818f66b20cff7858159a68f26ba
+algod(4769) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestManyAccountsCanGoOnline/Primary/node.log
+algod(4769) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(4769) : Initializing the Algorand node...
+algod(4769) : Success!
+algod(4769) : ⇨ http server started on 127.0.0.1:40167
+algod(4769) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:40167. Press Ctrl-C to exit
+algod(4776) : No REST API Token found. Generated token: f2073e4dc776f7d347590988f8e49a70d7f6b46f6b5a56984e55e1c6e266dd3f
+algod(4776) : No Admin REST API Token found. Generated token: 99a4d7d7138b18d603444543c99f786709239a5621114daf28ec0a7f47bab856
+algod(4776) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestManyAccountsCanGoOnline/Node/node.log
+algod(4776) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(4776) : Initializing the Algorand node...
+algod(4776) : Success!
+algod(4776) : ⇨ http server started on 127.0.0.1:36175
+algod(4776) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:36175. Press Ctrl-C to exit
+algod(4769) : Exiting on terminated
+algod(4776) : Exiting on terminated
+--- PASS: TestManyAccountsCanGoOnline (21.76s)
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/stress/transactions 21.821s
+=== RUN TestApplicationsUpgradeOverREST
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverREST/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverREST/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverREST/Wallet2.0.3000000.partkey
+test-unupgraded-protocol 100000
+algod(4864) : No REST API Token found. Generated token: 6261b8e000f0b0071cb84915d8c055a327148502db2ebf064797b5d3db704c87
+algod(4864) : No Admin REST API Token found. Generated token: 0d60fa1560cb641e7586ac81dce034942c8361901b4ab7f287866ec21c3fe612
+algod(4864) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverREST/Primary/node.log
+algod(4864) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(4864) : Initializing the Algorand node...
+algod(4864) : Success!
+algod(4864) : ⇨ http server started on 127.0.0.1:39413
+algod(4864) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:39413. Press Ctrl-C to exit
+algod(4872) : No REST API Token found. Generated token: e2f05bcbff87d14ce8accf989be6326f4b80f9c22cc610b2e96c166b44b18530
+algod(4872) : No Admin REST API Token found. Generated token: f1119a50b7179a73a2dd8597d9effe86ee1fbd48dba87279921597b732d95626
+algod(4872) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverREST/Node/node.log
+algod(4872) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(4872) : Initializing the Algorand node...
+algod(4872) : Success!
+algod(4872) : ⇨ http server started on 127.0.0.1:38505
+algod(4872) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:38505. Press Ctrl-C to exit
+algod(4864) : Exiting on terminated
+algod(4872) : Exiting on terminated
+--- PASS: TestApplicationsUpgradeOverREST (69.75s)
+=== RUN TestApplicationsUpgradeOverGossip
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverGossip/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverGossip/Wallet2.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverGossip/Wallet2.0.3000000.partkey
+test-unupgraded-protocol 100000
+algod(4973) : No REST API Token found. Generated token: fa0d13dd1788cb933faa4caeb2da99d0d0508797949771a15fe0ed22b13741d7
+algod(4973) : No Admin REST API Token found. Generated token: 06dab83008eb02d2b3294a34c4e9ac5692c8c175f017f6f4e01295a51083bccf
+algod(4973) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverGossip/Primary/node.log
+algod(4973) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(4973) : Initializing the Algorand node...
+algod(4973) : Success!
+algod(4973) : ⇨ http server started on 127.0.0.1:8080
+algod(4973) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(4980) : No REST API Token found. Generated token: a3dbdae529fa78b662bafc0936cabdedfe13f026df26abd5f6ef5f5cf588d5e8
+algod(4980) : No Admin REST API Token found. Generated token: f8e4ea07a936339a5302f06ab62d56c69f95119a56ec6dca436cc633e3a0bf4b
+algod(4980) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverGossip/Node/node.log
+algod(4980) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(4980) : Initializing the Algorand node...
+algod(4980) : Success!
+algod(4980) : ⇨ http server started on 127.0.0.1:46847
+algod(4980) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:46847. Press Ctrl-C to exit
+algod(4973) : Exiting on terminated
+algod(4980) : Exiting on terminated
+--- PASS: TestApplicationsUpgradeOverGossip (72.80s)
+=== RUN TestRekeyUpgrade
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRekeyUpgrade/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRekeyUpgrade/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRekeyUpgrade/Wallet2.0.3000000.partkey
+test-unupgraded-protocol 100000
+algod(5011) : No REST API Token found. Generated token: de8e10c1f94daa7f8e8cd7693717d0f765cf78ed5dff60e7ec7766ecbbdabb16
+algod(5011) : No Admin REST API Token found. Generated token: 11b074b9d81ca6210f3111e3be922949114f81955aaf94ec04aa58ed7f8cec25
+algod(5011) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRekeyUpgrade/Primary/node.log
+algod(5011) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(5011) : Initializing the Algorand node...
+algod(5011) : Success!
+algod(5011) : ⇨ http server started on 127.0.0.1:8080
+algod(5011) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(5018) : No REST API Token found. Generated token: 24e16935b6aa9de4f99f5993c7898ed46ecc48c3c3e591ef9e0adbdd0f0a4b36
+algod(5018) : No Admin REST API Token found. Generated token: aa0092539af074a6f53b2f51e49e1223ddc486f57f05525c25cf3ff323f10338
+algod(5018) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRekeyUpgrade/Node/node.log
+algod(5018) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(5018) : Initializing the Algorand node...
+algod(5018) : Success!
+algod(5018) : ⇨ http server started on 127.0.0.1:46697
+algod(5018) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:46697. Press Ctrl-C to exit
+algod(5011) : Exiting on terminated
+algod(5018) : Exiting on terminated
+--- PASS: TestRekeyUpgrade (53.44s)
+=== RUN TestAccountsCanSendMoneyAcrossUpgradeV15toV16
+=== PAUSE TestAccountsCanSendMoneyAcrossUpgradeV15toV16
+=== RUN TestAccountsCanSendMoneyAcrossUpgradeV21toV22
+=== PAUSE TestAccountsCanSendMoneyAcrossUpgradeV21toV22
+=== RUN TestAccountsCanSendMoneyAcrossUpgradeV22toV23
+=== PAUSE TestAccountsCanSendMoneyAcrossUpgradeV22toV23
+=== RUN TestAccountsCanSendMoneyAcrossUpgradeV23toV24
+=== PAUSE TestAccountsCanSendMoneyAcrossUpgradeV23toV24
+=== RUN TestAccountsCanSendMoneyAcrossUpgradeV24toV25
+=== PAUSE TestAccountsCanSendMoneyAcrossUpgradeV24toV25
+=== CONT TestAccountsCanSendMoneyAcrossUpgradeV15toV16
+=== CONT TestAccountsCanSendMoneyAcrossUpgradeV22toV23
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV22toV23/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV22toV23/Wallet1.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV15toV16/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV15toV16/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV22toV23/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV15toV16/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV15toV16/Wallet1.0.3000000.partkey
+test-fast-upgrade-https://github.com/algorand/spec/tree/a26ed78ed8f834e2b9ccb6eb7d3ee9f629a6e622 100000
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV22toV23/Wallet1.0.3000000.partkey
+test-fast-upgrade-https://github.com/algorandfoundation/specs/tree/57016b942f6d97e6d4c0688b373bb0a2fc85a1a2 100000
+algod(5073) : No REST API Token found. Generated token: dbd55a4c1f6b1b47a374d34ae807450e71609219eb752a148c75b9b2d559e69d
+algod(5073) : No Admin REST API Token found. Generated token: e7437d51b75133f1f9e1704e9bd5df61c5d0ad442b33adf086308c6248cb5610
+algod(5073) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV22toV23/Primary/node.log
+algod(5079) : No REST API Token found. Generated token: 9972f22898ef28ba5154338bce28992cfe9114122591220138cfc4e2d731e5c5
+algod(5079) : No Admin REST API Token found. Generated token: df461d6e095fec2dbbbace10bcb4c6ceefd9d923f2a26b16abc1480939a139a1
+algod(5079) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV15toV16/Primary/node.log
+algod(5073) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(5073) : Initializing the Algorand node...
+algod(5079) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(5079) : Initializing the Algorand node...
+algod(5079) : Success!
+algod(5073) : Success!
+algod(5073) : ⇨ http server started on 127.0.0.1:8080
+algod(5073) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(5079) : ⇨ http server started on 127.0.0.1:41335
+algod(5079) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:41335. Press Ctrl-C to exit
+algod(5087) : No REST API Token found. Generated token: 410317c6b612b3934f9319c2d10e01663b28ce707f5800b41b59dd0f707f66d6
+algod(5087) : No Admin REST API Token found. Generated token: 584a7a6eec2558f9f004c865d3ca315cd79caa39af1712cb0d771c8fe1120103
+algod(5087) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV22toV23/Node/node.log
+algod(5092) : No REST API Token found. Generated token: eae2fee1b135b3ac21cbcb3d706aa0aae337e1f6fbecc8ad41513ce96a958e78
+algod(5092) : No Admin REST API Token found. Generated token: 1291bcfa197105c5b063eb0238d7d0091d1de4aac8fe967167bafc4c6dfc60d3
+algod(5092) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV15toV16/Node/node.log
+algod(5092) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(5092) : Initializing the Algorand node...
+algod(5087) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(5087) : Initializing the Algorand node...
+algod(5092) : Success!
+algod(5092) : ⇨ http server started on 127.0.0.1:42041
+algod(5092) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:42041. Press Ctrl-C to exit
+algod(5087) : Success!
+algod(5087) : ⇨ http server started on 127.0.0.1:44361
+algod(5087) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:44361. Press Ctrl-C to exit
+algod(5079) : Exiting on terminated
+algod(5092) : Exiting on terminated
+--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV15toV16 (63.93s)
+=== CONT TestAccountsCanSendMoneyAcrossUpgradeV21toV22
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV21toV22/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV21toV22/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV21toV22/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV21toV22/Wallet1.0.3000000.partkey
+test-fast-upgrade-https://github.com/algorandfoundation/specs/tree/8096e2df2da75c3339986317f9abe69d4fa86b4b 100000
+algod(5160) : No REST API Token found. Generated token: cce645e6dde952b23ed9d4a9cea0ea30df4dc63be8f11c1ce62e2a5eb58a9c19
+algod(5160) : No Admin REST API Token found. Generated token: be5557090ce2d219d8213874f99d2a819e5f5b0b05f52aa4b6d08ded58db5968
+algod(5160) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV21toV22/Primary/node.log
+algod(5160) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(5160) : Initializing the Algorand node...
+algod(5160) : Success!
+algod(5160) : ⇨ http server started on 127.0.0.1:43497
+algod(5160) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:43497. Press Ctrl-C to exit
+algod(5168) : No REST API Token found. Generated token: 01a3a9cf7e402d3d8aa269f10d50dfc9bf489ab40ddfd9ae310d54e66df22aad
+algod(5168) : No Admin REST API Token found. Generated token: 0a9403999ac6c305759f7a5cefaaac7bb638864b290f31d70faab927dd5723d3
+algod(5168) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV21toV22/Node/node.log
+algod(5168) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(5168) : Initializing the Algorand node...
+algod(5168) : Success!
+algod(5168) : ⇨ http server started on 127.0.0.1:43463
+algod(5168) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:43463. Press Ctrl-C to exit
+algod(5073) : Exiting on terminated
+algod(5087) : Exiting on terminated
+=== CONT TestAccountsCanSendMoneyAcrossUpgradeV24toV25
+--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV22toV23 (80.36s)
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV24toV25/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV24toV25/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV24toV25/Wallet2.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV24toV25/Wallet1.0.3000000.partkey
+test-fast-upgrade-https://github.com/algorandfoundation/specs/tree/3a83c4c743f8b17adfd73944b4319c25722a6782 100000
+algod(5229) : No REST API Token found. Generated token: 49bf9511e15ec96766017c74abcde66e975ad057ddcf114d8ca92ad871bccf04
+algod(5229) : No Admin REST API Token found. Generated token: c4cd35cf2024fdc64c2111be09b1e840730cfdcea2564e3f375af5106848c7f9
+algod(5229) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV24toV25/Primary/node.log
+algod(5229) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(5229) : Initializing the Algorand node...
+algod(5229) : Success!
+algod(5229) : ⇨ http server started on 127.0.0.1:8080
+algod(5229) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit
+algod(5236) : No REST API Token found. Generated token: 4f02a6b1c63caafcfd982b1a71ba17e9b66ab6fa78a4afc8115740467769a90d
+algod(5236) : No Admin REST API Token found. Generated token: 5b52529e886690ea5999af26697d0d6c38ed53e97c315c56da5e4c4cd3165465
+algod(5236) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV24toV25/Node/node.log
+algod(5236) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(5236) : Initializing the Algorand node...
+algod(5236) : Success!
+algod(5236) : ⇨ http server started on 127.0.0.1:35143
+algod(5236) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35143. Press Ctrl-C to exit
+algod(5160) : Exiting on terminated
+algod(5168) : Exiting on terminated
+--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV21toV22 (62.73s)
+=== CONT TestAccountsCanSendMoneyAcrossUpgradeV23toV24
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV23toV24/Wallet2.rootkey
+Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV23toV24/Wallet1.rootkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV23toV24/Wallet1.0.3000000.partkey
+Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV23toV24/Wallet2.0.3000000.partkey
+test-fast-upgrade-https://github.com/algorandfoundation/specs/tree/e5f565421d720c6f75cdd186f7098495caf9101f 100000
+algod(5272) : No REST API Token found. Generated token: 8638c98dbae7c4121d822b1031ac8545eb1751fd2846222d1dffc0253895de29
+algod(5272) : No Admin REST API Token found. Generated token: cd4d700c443ab16a438276edbc1bfdb8b9cbf5302ac9a7c8000a0749cb4123f3
+algod(5272) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV23toV24/Primary/node.log
+algod(5272) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(5272) : Initializing the Algorand node...
+algod(5272) : Success!
+algod(5272) : ⇨ http server started on 127.0.0.1:39637
+algod(5272) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:39637. Press Ctrl-C to exit
+algod(5279) : No REST API Token found. Generated token: 53a88690387e7aaeea3de1dc1ed525337114f994a34ba14f02cdac1b64596fb6
+algod(5279) : No Admin REST API Token found. Generated token: 8cd9a4bdb35fbeedbb59d904fb486dd4374efe4d5eb5cebadea3b369e369c44d
+algod(5279) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV23toV24/Node/node.log
+algod(5279) : Deadlock detection is set to: enabled (Default state is 'enable')
+algod(5279) : Initializing the Algorand node...
+algod(5279) : Success!
+algod(5279) : ⇨ http server started on 127.0.0.1:42017
+algod(5279) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:42017. Press Ctrl-C to exit
+algod(5236) : Exiting on terminated
+algod(5229) : Exiting on terminated
+--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV24toV25 (80.65s)
+algod(5272) : Exiting on terminated
+algod(5279) : Exiting on terminated
+--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV23toV24 (63.01s)
+PASS
+ok github.com/algorand/go-algorand/test/e2e-go/upgrades 385.737s
+FAIL
+
diff --git a/debug/logfilter/example6.out.expected b/debug/logfilter/example6.out.expected
new file mode 100644
index 000000000..b3d517124
--- /dev/null
+++ b/debug/logfilter/example6.out.expected
@@ -0,0 +1,385 @@
+--- PASS: TestAlgodLogsToFile (1.25s)
+--- PASS: TestNodeControllerCleanup (5.44s)
+ok github.com/algorand/go-algorand/test/e2e-go/cli/algod 5.507s
+--- PASS: TestAlgodWithExpect (0.07s)
+ --- PASS: TestAlgodWithExpect/algodTelemetryLocationTest.exp (0.07s)
+ok github.com/algorand/go-algorand/test/e2e-go/cli/algod/expect 0.150s
+--- PASS: TestAlgohWithExpect (213.98s)
+ --- PASS: TestAlgohWithExpect/algohTimeoutTest.exp (213.98s)
+ok github.com/algorand/go-algorand/test/e2e-go/cli/algoh/expect 214.091s
+--- PASS: TestAccountNew (1.75s)
+--- PASS: TestAccountNewDuplicateFails (0.30s)
+--- PASS: TestAccountRename (0.85s)
+--- PASS: TestAccountMultipleImportRootKey (0.49s)
+--- PASS: TestClerkSendNoteEncoding (18.65s)
+--- PASS: TestGoalNodeCleanup (0.44s)
+ok github.com/algorand/go-algorand/test/e2e-go/cli/goal 26.354s
+
+--- FAIL: TestGoalWithExpect (1538.34s)
+ --- PASS: TestGoalWithExpect/basicGoalTest.exp (37.79s)
+ --- PASS: TestGoalWithExpect/goalAppAccountAddressTest.exp (75.84s)
+ --- PASS: TestGoalWithExpect/goalNodeConnectionTest.exp (4.91s)
+ --- PASS: TestGoalWithExpect/goalNodeSystemdTest.exp (0.19s)
+ --- PASS: TestGoalWithExpect/multisigCreationDeletionTest.exp (8.93s)
+ --- PASS: TestGoalWithExpect/reportTest.exp (6.20s)
+ --- PASS: TestGoalWithExpect/statefulTealAppInfoTest.exp (21.86s)
+ --- PASS: TestGoalWithExpect/basicExpectTest.exp (0.01s)
+ --- PASS: TestGoalWithExpect/listExpiredParticipationKeyTest.exp (67.63s)
+ --- PASS: TestGoalWithExpect/goalNodeTest.exp (15.93s)
+ --- PASS: TestGoalWithExpect/doubleSpendingTest.exp (59.13s)
+ --- PASS: TestGoalWithExpect/goalAccountTest.exp (24.57s)
+ --- PASS: TestGoalWithExpect/goalCmdFlagsTest.exp (0.98s)
+ --- PASS: TestGoalWithExpect/goalDryrunRestTest.exp (37.95s)
+ --- PASS: TestGoalWithExpect/goalFormattingTest.exp (0.16s)
+ --- PASS: TestGoalWithExpect/limitOrderTest.exp (107.89s)
+ --- PASS: TestGoalWithExpect/tealConsensusTest.exp (12.31s)
+ --- PASS: TestGoalWithExpect/goalAssetTest.exp (41.96s)
+ --- PASS: TestGoalWithExpect/ledgerTest.exp (9.53s)
+
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+ expectFixture.go:157: err running 'tealAndStatefulTealTest.exp': exit status 1
+ stdout: TEST_ALGO_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod
+ TEST_DATA_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+ network create test_net_expect_1617230151
+ spawn goal network create --network test_net_expect_1617230151 --template /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/TwoNodes50EachFuture.json --datadir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod --rootdir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root
+ Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Wallet1.rootkey
+ Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Wallet2.rootkey
+ Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Wallet1.0.3000000.partkey
+ Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Wallet2.0.3000000.partkey
+ future 100000
+ Network test_net_expect_1617230151 created under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root
+ network start test_net_expect_1617230151
+ spawn goal network start -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root
+ Network Started under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root
+ network status test_net_expect_1617230151
+ spawn goal network status -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root
+
+ [Primary]
+ Last committed block: 0
+ Time since last block: 0.0s
+ Sync Time: 0.0s
+ Last consensus protocol: future
+ Next consensus protocol: future
+ Round for next consensus protocol: 1
+ Next consensus protocol supported: true
+
+ [Node]
+ Last committed block: 0
+ Time since last block: 0.0s
+ Sync Time: 0.6s
+ Last consensus protocol: future
+ Next consensus protocol: future
+ Round for next consensus protocol: 1
+ Next consensus protocol supported: true
+
+ StartNetwork complete
+ Primary node address is: 127.0.0.1:43613
+ Primary Node Address: 127.0.0.1:43613
+ spawn goal account list -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+ [online] W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE 5000000000000000 microAlgos
+ Account Address: W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE Balance: 5000000000000000
+ spawn goal account balance -w unencrypted-default-wallet -a W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+ 5000000000000000 microAlgos
+ Wallet: unencrypted-default-wallet, Account: W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE, Balance: 5000000000000000
+ Primary Account Balance: 5000000000000000
+ spawn goal account rewards -w unencrypted-default-wallet -a W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+ 0 microAlgos
+ Wallet: unencrypted-default-wallet, Account: W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE, Rewards: 0
+ Primary Account Rewards: 0
+ spawn goal wallet new Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+ Please choose a password for wallet 'Wallet_1_1617230151':
+ Please confirm the password:
+ Creating wallet...
+ Created wallet 'Wallet_1_1617230151'
+ Your new wallet has a backup phrase that can be used for recovery.
+ Keeping this backup phrase safe is extremely important.
+ Would you like to see it now? (Y/n): y
+ Your backup phrase is printed below.
+ Keep this information safe -- never share it with anyone!
+
+ One or more non-printable characters were ommited from the subsequent line:
+ [32mattract shy usage prison umbrella december sail finish struggle spring walk wisdom bread globe eyebrow admit typical tag december poet labor cable radar absent secret[0mWALLET_1_PASSPHRASE: attract shy usage prison umbrella december sail finish struggle spring walk wisdom bread globe eyebrow admit typical tag december poet labor cable radar absent secret
+ spawn goal wallet list -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+ ##################################################
+ Wallet: Wallet_1_1617230151
+ ID: 12dd4a15929ae17827788883ca77479d
+ ##################################################
+ Wallet: unencrypted-default-wallet
+ ID: ec9a33b376e4635705e1339deb6e799b
+ spawn goal account new -w Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+ Please enter the password for wallet 'Wallet_1_1617230151':
+ Created new account with address GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ
+ Account Address: GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ
+ spawn goal account list -w Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+ [offline] Unnamed-0 GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ 0 microAlgosAccount Address: GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ
+ spawn goal wallet new Wallet_2_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+ Please choose a password for wallet 'Wallet_2_1617230151':
+ Please confirm the password:
+ Creating wallet...
+ Created wallet 'Wallet_2_1617230151'
+ Your new wallet has a backup phrase that can be used for recovery.
+ Keeping this backup phrase safe is extremely important.
+ Would you like to see it now? (Y/n): y
+ Your backup phrase is printed below.
+ Keep this information safe -- never share it with anyone!
+
+ One or more non-printable characters were ommited from the subsequent line:
+ [32mcasual double chuckle method salmon talent cabbage maze parade luggage under elite pass best session sign december cliff master proud since crunch joy above jump[0m
+ WALLET_2_PASSPHRASE: casual double chuckle method salmon talent cabbage maze parade luggage under elite pass best session sign december cliff master proud since crunch joy above jump
+ spawn goal wallet list -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+ ##################################################
+ Wallet: Wallet_1_1617230151
+ ID: 12dd4a15929ae17827788883ca77479d
+ ##################################################
+ Wallet: Wallet_2_1617230151
+ ID: 2edbca9e4d78d43556f46cc991415da5
+ spawn goal account new -w Wallet_2_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+ Please enter the password for wallet 'Wallet_2_1617230151': 12345678
+
+ Created new account with address F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UAAccount Address: F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA
+ spawn goal account list -w Wallet_2_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+ [offline] Unnamed-1 F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA 0 microAlgosAccount Address: F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA
+ spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE --to GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+ Sent 1000000000 MicroAlgos from account W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE to address GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ, transaction ID: ZUOOECVQQBAMVGTSWHYJG5RGHEUEQJZHL2BUJTSL5YKYBYX5HJJA. Fee set to 1000
+ Transaction ZUOOECVQQBAMVGTSWHYJG5RGHEUEQJZHL2BUJTSL5YKYBYX5HJJA still pending as of round 8
+ Transaction ZUOOECVQQBAMVGTSWHYJG5RGHEUEQJZHL2BUJTSL5YKYBYX5HJJA committed in round 10
+ TRANSACTION_ID 1: ZUOOECVQQBAMVGTSWHYJG5RGHEUEQJZHL2BUJTSL5YKYBYX5HJJA
+ spawn goal account balance -a GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ -w Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+ 1000000000 microAlgos
+ Account Balance: 1000000000
+ Account balance OK: 1000000000
+ spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE --to F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+ Sent 1000000000 MicroAlgos from account W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE to address F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA, transaction ID: NAFNAJF5Q2OTTRVGFRV63GTIHPPDECXND3I6HES3FKJNALLMEDPQ. Fee set to 1000
+ Transaction NAFNAJF5Q2OTTRVGFRV63GTIHPPDECXND3I6HES3FKJNALLMEDPQ still pending as of round 11
+ Transaction NAFNAJF5Q2OTTRVGFRV63GTIHPPDECXND3I6HES3FKJNALLMEDPQ committed in round 13
+ TRANSACTION_ID 2: NAFNAJF5Q2OTTRVGFRV63GTIHPPDECXND3I6HES3FKJNALLMEDPQ
+ spawn goal account balance -a F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA -w Wallet_2_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+ 1000000000 microAlgos
+ Account Balance: 1000000000
+ Account balance OK: 1000000000
+ setting up working dir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work
+
+ writing teal script to file '/home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal'
+ reading from file /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal
+ #pragma version 2
+ int 1
+
+ spawn goal clerk compile /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal
+ /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal: YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA
+ spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE --to YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+ Sent 1000000000 MicroAlgos from account W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE to address YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA, transaction ID: IAS7I66XI6GH7S5RJVJ5ENTMSES62SQKODCF56N6QV7LYV3CCHTA. Fee set to 1000
+ Transaction IAS7I66XI6GH7S5RJVJ5ENTMSES62SQKODCF56N6QV7LYV3CCHTA still pending as of round 14
+ Transaction IAS7I66XI6GH7S5RJVJ5ENTMSES62SQKODCF56N6QV7LYV3CCHTA committed in round 16
+ TRANSACTION_ID_APP: IAS7I66XI6GH7S5RJVJ5ENTMSES62SQKODCF56N6QV7LYV3CCHTA, APP_ACCOUNT_ADDRESS: YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA
+ spawn goal account balance -a YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA -w Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+ 1000000000 microAlgos
+ Account Balance: 1000000000
+ Account balance OK: 1000000000
+ calling app create
+ calling goal app create
+ spawn goal app create --creator W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE --approval-prog /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal --global-byteslices 1 --global-ints 0 --local-byteslices 0 --local-ints 0 --clear-prog /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/
+ Attempting to create app (approval size 5, hash YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646A; clear size 5, hash YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646A)
+ Issued transaction from account W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE, txid JHHBKLA2OMDYMOPX4MTKQJ2G5HRTORQASBRS5MGPABS4RYE5R7KA (fee 1000)
+ Transaction JHHBKLA2OMDYMOPX4MTKQJ2G5HRTORQASBRS5MGPABS4RYE5R7KA still pending as of round 19
+ Transaction JHHBKLA2OMDYMOPX4MTKQJ2G5HRTORQASBRS5MGPABS4RYE5R7KA committed in round 21
+ Created app with app index 4App ID 4
+ spawn goal clerk sign -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ -w Wallet_1_1617230151 -i unsginedtransaction1.tx -o sginedtransaction1.tx
+ Aborting with Error: Timed out signing transaction
+ GLOBAL_TEST_ROOT_DIR /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root
+ GLOBAL_NETWORK_NAME test_net_expect_1617230151
+ Stopping network: test_net_expect_1617230151
+ spawn goal network stop -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root
+ Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root
+ Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root
+
+
+ stderr:
+ --- FAIL: TestGoalWithExpect/tealAndStatefulTealTest.exp (115.77s)
+ --- PASS: TestGoalWithExpect/testInfraTest.exp (3.41s)
+ --- PASS: TestGoalWithExpect/createWalletTest.exp (243.76s)
+ --- PASS: TestGoalWithExpect/goalTxValidityTest.exp (6.89s)
+
+ expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod
+ testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+ expectFixture.go:157: err running 'pingpongTest.exp': exit status 1
+ stdout: starting pinpongTest
+ TEST_ALGO_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod
+ TEST_DATA_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata
+ network create test_net_expect_1617230521
+ spawn goal network create --network test_net_expect_1617230521 --template /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/TwoNodes50EachFuture.json --datadir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod --rootdir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521
+ Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Wallet2.rootkey
+ Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Wallet1.rootkey
+ Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Wallet1.0.3000000.partkey
+ Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Wallet2.0.3000000.partkey
+ future 100000
+ Network test_net_expect_1617230521 created under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521
+ network start test_net_expect_1617230521
+ spawn goal network start -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521
+ Network Started under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521
+ network status test_net_expect_1617230521
+ spawn goal network status -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521
+
+ [Primary]
+ Last committed block: 0
+ Time since last block: 0.0s
+ Sync Time: 0.0s
+ Last consensus protocol: future
+ Next consensus protocol: future
+ Round for next consensus protocol: 1
+ Next consensus protocol supported: true
+
+ [Node]
+ Last committed block: 0
+ Time since last block: 0.0s
+ Sync Time: 0.7s
+ Last consensus protocol: future
+ Next consensus protocol: future
+ Round for next consensus protocol: 1
+ Next consensus protocol supported: true
+
+ StartNetwork complete
+ Primary node address is: 127.0.0.1:37299
+ Primary Node Address: 127.0.0.1:37299
+ spawn goal account list -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Primary/
+ [online] 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U 5000000000000000 microAlgos
+ Account Address: 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U Balance: 5000000000000000
+ spawn goal account balance -w unencrypted-default-wallet -a 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Primary/
+ 5000000000000000 microAlgos
+ Wallet: unencrypted-default-wallet, Account: 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U, Balance: 5000000000000000
+ Primary Account Balance: 5000000000000000
+ node status waiting for Round 1
+ spawn node status
+ node status check complete, current round is 0
+ Current Round: '0' is less than wait for round: '1'
+ sleep time 0
+ spawn node status
+ node status check complete, current round is 0
+ Current Round: '0' is less than wait for round: '1'
+ sleep time 1
+ spawn node status
+ Aborting with Error: goal node status timed out
+ GLOBAL_TEST_ROOT_DIR /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521
+ GLOBAL_NETWORK_NAME test_net_expect_1617230521
+ Stopping network: test_net_expect_1617230521
+ Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521
+
+ stderr:
+ --- FAIL: TestGoalWithExpect/pingpongTest.exp (26.30s)
+ --- PASS: TestGoalWithExpect/catchpointCatchupTest.exp (207.24s)
+ --- PASS: TestGoalWithExpect/goalClerkGroupTest.exp (21.29s)
+ --- PASS: TestGoalWithExpect/goalNodeStatusTest.exp (18.58s)
+ --- PASS: TestGoalWithExpect/statefulTealAppReadTest.exp (47.93s)
+ --- PASS: TestGoalWithExpect/corsTest.exp (9.63s)
+ --- PASS: TestGoalWithExpect/goalAccountInfoTest.exp (102.64s)
+ --- PASS: TestGoalWithExpect/statefulTealCreateAppTest.exp (201.07s)
+FAIL github.com/algorand/go-algorand/test/e2e-go/cli/goal/expect 1538.381s
+ok github.com/algorand/go-algorand/test/e2e-go/cli/perf 0.131s [no tests to run]
+--- PASS: TestTealdbgWithExpect (3.29s)
+ --- PASS: TestTealdbgWithExpect/tealdbgSpinoffTest.exp (1.17s)
+ --- PASS: TestTealdbgWithExpect/tealdbgTest.exp (2.12s)
+ok github.com/algorand/go-algorand/test/e2e-go/cli/tealdbg/expect 3.352s
+--- PASS: TestDeadbeatBid (142.06s)
+--- PASS: TestStartAndEndAuctionTenUsersTenBidsEach (168.01s)
+--- PASS: TestDecayingPrice (108.82s)
+--- PASS: TestStartAndCancelAuctionEarlyOneUserTenBids (91.91s)
+--- PASS: TestStartAndCancelAuctionOneUserTenBids (135.28s)
+--- PASS: TestStartAndEndAuctionTenUsersOneBidEach (155.63s)
+ok github.com/algorand/go-algorand/test/e2e-go/features/auction 406.589s
+--- PASS: TestCompactCerts (224.23s)
+ok github.com/algorand/go-algorand/test/e2e-go/features/compactcert 224.312s
+--- PASS: TestZeroSigners (15.21s)
+--- PASS: TestDuplicateKeys (31.65s)
+--- PASS: TestZeroThreshold (14.61s)
+--- PASS: TestBasicMultisig (52.16s)
+ok github.com/algorand/go-algorand/test/e2e-go/features/multisig 67.439s
+--- PASS: TestParticipationKeyOnlyAccountParticipatesCorrectly (32.97s)
+--- PASS: TestRewardUnitThreshold (79.11s)
+--- PASS: TestOnlineOfflineRewards (97.68s)
+--- PASS: TestRewardRateRecalculation (105.57s)
+ok github.com/algorand/go-algorand/test/e2e-go/features/participation 184.782s
+--- PASS: TestLeaseTransactionsSameSender (21.84s)
+--- PASS: TestAssetInformation (29.59s)
+--- PASS: TestTxnMerkleProof (9.01s)
+--- PASS: TestAccountsCanChangeOnlineStateInTheFuture (23.47s)
+--- PASS: TestAccountsCanSendMoney (31.42s)
+--- PASS: TestAccountsCanChangeOnlineState (13.95s)
+--- PASS: TestLeaseTransactionsDifferentSender (18.40s)
+--- PASS: TestLeaseTransactionsSameSenderDifferentLease (9.77s)
+--- PASS: TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7 (14.01s)
+--- PASS: TestAssetCreateWaitRestartDelete (35.88s)
+--- PASS: TestOverlappingLeases (92.91s)
+--- PASS: TestGroupTransactionsSubmission (7.79s)
+--- PASS: TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7 (17.76s)
+--- PASS: TestGroupTransactionsDifferentSizes (34.62s)
+--- PASS: TestAccountsCanClose (34.81s)
+--- PASS: TestGroupTransactions (30.62s)
+--- PASS: TestAssetSend (34.51s)
+--- PASS: TestAssetValidRounds (10.39s)
+--- PASS: TestAssetGroupCreateSendDestroy (31.74s)
+--- PASS: TestAccountInformationV2 (28.95s)
+ok github.com/algorand/go-algorand/test/e2e-go/features/transactions 270.274s
+--- PASS: TestGoodAuthSucceeds (0.21s)
+--- PASS: TestMasterKeyGeneratePastImportedKeys (0.30s)
+--- PASS: TestMultisigImportList (0.26s)
+--- PASS: TestSignProgram (0.25s)
+--- PASS: TestMasterKeyImportExport (0.32s)
+--- PASS: TestDeleteKey (0.25s)
+--- PASS: TestSignTransaction (0.26s)
+--- PASS: TestImportKey (0.26s)
+--- PASS: TestAbsSQLiteWalletConfigSucceeds (0.00s)
+--- PASS: TestExportKey (0.26s)
+--- PASS: TestNonAbsSQLiteWalletConfigFails (0.00s)
+--- PASS: TestGenerateAndListKeys (0.27s)
+--- PASS: TestBadAuthFails (0.21s)
+--- PASS: TestServerStartsStopsSuccessfully (0.21s)
+--- PASS: TestWalletCreation (0.23s)
+--- PASS: TestWalletSessionExpiry (2.23s)
+--- PASS: TestWalletSessionRenew (2.23s)
+--- PASS: TestWalletRename (0.24s)
+--- PASS: TestBlankWalletCreation (0.23s)
+--- PASS: TestMultisigSignWithSigner (0.25s)
+--- PASS: TestWalletSessionRelease (0.23s)
+--- PASS: TestMultisigSignWithWrongSigner (0.26s)
+--- PASS: TestMultisigSignProgram (0.26s)
+--- PASS: TestMultisigSign (0.25s)
+--- PASS: TestMultisigExportDelete (0.25s)
+ok github.com/algorand/go-algorand/test/e2e-go/kmd 5.048s
+ok github.com/algorand/go-algorand/test/e2e-go/perf 0.069s [no tests to run]
+--- PASS: TestClientCanGetStatus (0.01s)
+--- PASS: TestClientCanGetStatusAfterBlock (13.80s)
+--- PASS: TestTransactionsByAddr (17.97s)
+--- PASS: TestClientCanGetVersion (0.00s)
+--- PASS: TestClientCanGetSuggestedFee (0.02s)
+--- PASS: TestClientCanGetMinTxnFee (0.01s)
+--- PASS: TestClientCanGetBlockInfo (0.03s)
+--- PASS: TestClientRejectsBadFromAddressWhenSending (0.01s)
+--- PASS: TestClientRejectsBadToAddressWhenSending (0.02s)
+--- PASS: TestClientRejectsMutatedFromAddressWhenSending (0.02s)
+--- PASS: TestClientRejectsMutatedToAddressWhenSending (0.01s)
+--- PASS: TestClientRejectsSendingMoneyFromAccountForWhichItHasNoKey (0.01s)
+--- PASS: TestClientOversizedNote (0.02s)
+--- PASS: TestClientCanSendAndGetNote (7.32s)
+--- PASS: TestClientCanGetTransactionStatus (8.23s)
+--- PASS: TestAccountBalance (8.33s)
+--- PASS: TestAccountParticipationInfo (8.33s)
+--- PASS: TestSupply (0.00s)
+--- PASS: TestClientCanGetGoRoutines (0.01s)
+--- PASS: TestSendingTooMuchFails (0.07s)
+--- PASS: TestSendingFromEmptyAccountFails (0.04s)
+--- PASS: TestSendingTooLittleToEmptyAccountFails (0.06s)
+--- PASS: TestSendingLowFeeFails (0.07s)
+--- PASS: TestSendingNotClosingAccountFails (3.89s)
+--- PASS: TestClientCanGetPendingTransactions (8.20s)
+--- PASS: TestClientTruncatesPendingTransactions (9.60s)
+--- PASS: TestManyAccountsCanGoOnline (21.76s)
+ok github.com/algorand/go-algorand/test/e2e-go/stress/transactions 21.821s
+--- PASS: TestApplicationsUpgradeOverREST (69.75s)
+--- PASS: TestApplicationsUpgradeOverGossip (72.80s)
+--- PASS: TestRekeyUpgrade (53.44s)
+--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV15toV16 (63.93s)
+--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV22toV23 (80.36s)
+--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV21toV22 (62.73s)
+--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV24toV25 (80.65s)
+--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV23toV24 (63.01s)
+ok github.com/algorand/go-algorand/test/e2e-go/upgrades 385.737s
diff --git a/debug/logfilter/example7.in b/debug/logfilter/example7.in
new file mode 100644
index 000000000..0953dd539
--- /dev/null
+++ b/debug/logfilter/example7.in
@@ -0,0 +1,63 @@
+2021/04/13 21:46:13 Desc object: [{"name":"globals","value":{"type":"object","className":"Object","description":"Object","objectId":"globalsObjID","preview":{"type":"object","description":"Object","overflow":true,"properties":[{"name":"error","type":"undefined","value":"globals: invalid length 0 != 10"}]}},"writable":false,"configurable":false,"enumerable":true,"isOwn":true}]
+2021/04/13 21:46:13 ------------------------------------------------
+2021/04/13 21:46:13 CDT debugger listening on: ws://127.0.0.1:12345/test
+2021/04/13 21:46:13 Or open in Chrome:
+2021/04/13 21:46:13 devtools://devtools/bundled/js_app.html?experiments=true&v8only=false&ws=127.0.0.1:12345/test
+2021/04/13 21:46:13 ------------------------------------------------
+2021/04/13 21:46:13 ------------------------------------------------
+2021/04/13 21:46:13 CDT debugger listening on: ws://127.0.0.1:12345/test
+2021/04/13 21:46:13 Or open in Chrome:
+2021/04/13 21:46:13 devtools://devtools/bundled/js_app.html?experiments=true&v8only=false&ws=127.0.0.1:12345/test
+2021/04/13 21:46:13 ------------------------------------------------
+2021/04/13 21:46:13 ------------------------------------------------
+2021/04/13 21:46:13 CDT debugger listening on: ws://127.0.0.1:12345/test2
+2021/04/13 21:46:13 Or open in Chrome:
+2021/04/13 21:46:13 devtools://devtools/bundled/js_app.html?experiments=true&v8only=false&ws=127.0.0.1:12345/test2
+2021/04/13 21:46:13 ------------------------------------------------
+2021/04/13 21:46:13 ------------------------------------------------
+2021/04/13 21:46:13 CDT debugger listening on: ws://127.0.0.1:12345/test
+2021/04/13 21:46:13 Or open in Chrome:
+2021/04/13 21:46:13 devtools://devtools/bundled/js_app.html?experiments=true&v8only=false&ws=127.0.0.1:12345/test
+2021/04/13 21:46:13 ------------------------------------------------
+2021/04/13 21:46:13 CDT session test closed
+--- FAIL: TestBalanceAdapterStateChanges (0.00s)
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: stateful
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: stateful
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: stateful
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: logicsig
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: logicsig
+2021/04/13 21:46:13 Run mode: logicsig
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: stateful
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: logicsig
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: stateful
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: logicsig
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: stateful
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: stateful
+2021/04/13 21:46:13 starting server on 127.0.0.1:63081
+2021/04/13 21:46:14 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:14 Run mode: logicsig
+2021/04/13 21:46:14 Open http://127.0.0.1:12345 in a web browser
+2021/04/13 21:46:14 subscribeHandler error: websocket: response does not implement http.Hijacker
+FAIL
+FAIL github.com/algorand/go-algorand/cmd/tealdbg 0.903s
+FAIL
diff --git a/debug/logfilter/example7.out.expected b/debug/logfilter/example7.out.expected
new file mode 100644
index 000000000..c7df1d1a6
--- /dev/null
+++ b/debug/logfilter/example7.out.expected
@@ -0,0 +1,64 @@
+--- FAIL: TestBalanceAdapterStateChanges (0.00s)
+2021/04/13 21:46:13 Desc object: [{"name":"globals","value":{"type":"object","className":"Object","description":"Object","objectId":"globalsObjID","preview":{"type":"object","description":"Object","overflow":true,"properties":[{"name":"error","type":"undefined","value":"globals: invalid length 0 != 10"}]}},"writable":false,"configurable":false,"enumerable":true,"isOwn":true}]
+2021/04/13 21:46:13 ------------------------------------------------
+2021/04/13 21:46:13 CDT debugger listening on: ws://127.0.0.1:12345/test
+2021/04/13 21:46:13 Or open in Chrome:
+2021/04/13 21:46:13 devtools://devtools/bundled/js_app.html?experiments=true&v8only=false&ws=127.0.0.1:12345/test
+2021/04/13 21:46:13 ------------------------------------------------
+2021/04/13 21:46:13 ------------------------------------------------
+2021/04/13 21:46:13 CDT debugger listening on: ws://127.0.0.1:12345/test
+2021/04/13 21:46:13 Or open in Chrome:
+2021/04/13 21:46:13 devtools://devtools/bundled/js_app.html?experiments=true&v8only=false&ws=127.0.0.1:12345/test
+2021/04/13 21:46:13 ------------------------------------------------
+2021/04/13 21:46:13 ------------------------------------------------
+2021/04/13 21:46:13 CDT debugger listening on: ws://127.0.0.1:12345/test2
+2021/04/13 21:46:13 Or open in Chrome:
+2021/04/13 21:46:13 devtools://devtools/bundled/js_app.html?experiments=true&v8only=false&ws=127.0.0.1:12345/test2
+2021/04/13 21:46:13 ------------------------------------------------
+2021/04/13 21:46:13 ------------------------------------------------
+2021/04/13 21:46:13 CDT debugger listening on: ws://127.0.0.1:12345/test
+2021/04/13 21:46:13 Or open in Chrome:
+2021/04/13 21:46:13 devtools://devtools/bundled/js_app.html?experiments=true&v8only=false&ws=127.0.0.1:12345/test
+2021/04/13 21:46:13 ------------------------------------------------
+2021/04/13 21:46:13 CDT session test closed
+
+FAIL github.com/algorand/go-algorand/cmd/tealdbg 0.903s...
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: stateful
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: stateful
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: stateful
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: logicsig
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: logicsig
+2021/04/13 21:46:13 Run mode: logicsig
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: stateful
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: logicsig
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: stateful
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: logicsig
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: stateful
+2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:13 Run mode: stateful
+2021/04/13 21:46:13 starting server on 127.0.0.1:63081
+2021/04/13 21:46:14 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff
+2021/04/13 21:46:14 Run mode: logicsig
+2021/04/13 21:46:14 Open http://127.0.0.1:12345 in a web browser
+2021/04/13 21:46:14 subscribeHandler error: websocket: response does not implement http.Hijacker
+
+FAIL github.com/algorand/go-algorand/cmd/tealdbg 0.903s
diff --git a/debug/logfilter/example8.in b/debug/logfilter/example8.in
new file mode 100644
index 000000000..b538811e0
--- /dev/null
+++ b/debug/logfilter/example8.in
@@ -0,0 +1,47 @@
+=== RUN TestParticipationKeyOnlyAccountParticipatesCorrectly
+=== PAUSE TestParticipationKeyOnlyAccountParticipatesCorrectly
+=== RUN TestNewAccountCanGoOnlineAndParticipate
+ onlineOfflineParticipation_test.go:105:
+--- SKIP: TestNewAccountCanGoOnlineAndParticipate (0.00s)
+=== RUN TestOverlappingParticipationKeys
+=== PAUSE TestOverlappingParticipationKeys
+=== RUN TestOnlineOfflineRewards
+=== PAUSE TestOnlineOfflineRewards
+=== RUN TestPartkeyOnlyRewards
+ participationRewards_test.go:139:
+--- SKIP: TestPartkeyOnlyRewards (0.00s)
+=== RUN TestRewardUnitThreshold
+=== PAUSE TestRewardUnitThreshold
+=== RUN TestRewardRateRecalculation
+=== PAUSE TestRewardRateRecalculation
+=== CONT TestOverlappingParticipationKeys
+=== CONT TestRewardRateRecalculation
+--- FAIL: TestOverlappingParticipationKeys (0.00s)
+panic: CreateNetworkFromTemplate failed: open /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/ShortParticipationKeys.json: no such file or directory [recovered]
+ panic: CreateNetworkFromTemplate failed: open /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/ShortParticipationKeys.json: no such file or directory
+
+goroutine 119 [running]:
+testing.tRunner.func1.1(0x1515200, 0xc0002f0880)
+ /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/testing/testing.go:988 +0x452
+testing.tRunner.func1(0xc0002377a0)
+ /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/testing/testing.go:991 +0x600
+panic(0x1515200, 0xc0002f0880)
+ /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/runtime/panic.go:975 +0x3e3
+github.com/algorand/go-algorand/test/framework/fixtures.(*baseFixture).failOnError(...)
+ /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/baseFixture.go:69
+github.com/algorand/go-algorand/test/framework/fixtures.(*LibGoalFixture).failOnError(0xc0001d6800, 0x19ee400, 0xc0002db290, 0x16d71fc, 0x24)
+ /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/libgoalFixture.go:329 +0x1d8
+github.com/algorand/go-algorand/test/framework/fixtures.(*LibGoalFixture).setup(0xc0001d6800, 0x1a19280, 0xc0002377a0, 0x16d25f5, 0x20, 0xc000104ea0, 0x28, 0xc000091000)
+ /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/libgoalFixture.go:97 +0x490
+github.com/algorand/go-algorand/test/framework/fixtures.(*LibGoalFixture).SetupNoStart(0xc0001d6800, 0x1a19280, 0xc0002377a0, 0xc000104ea0, 0x28)
+ /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/libgoalFixture.go:73 +0x92
+github.com/algorand/go-algorand/test/framework/fixtures.(*RestClientFixture).SetupNoStart(...)
+ /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/restClientFixture.go:50
+github.com/algorand/go-algorand/test/e2e-go/features/participation.TestOverlappingParticipationKeys(0xc0002377a0)
+ /home/travis/gopath/src/github.com/algorand/go-algorand/test/e2e-go/features/participation/overlappingParticipationKeys_test.go:58 +0x3a0
+testing.tRunner(0xc0002377a0, 0x17230a0)
+ /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/testing/testing.go:1039 +0x1ec
+created by testing.(*T).Run
+ /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/testing/testing.go:1090 +0x701
+FAIL github.com/algorand/go-algorand/test/e2e-go/features/participation 0.069s
+
diff --git a/debug/logfilter/example8.out.expected b/debug/logfilter/example8.out.expected
new file mode 100644
index 000000000..68ea01a6b
--- /dev/null
+++ b/debug/logfilter/example8.out.expected
@@ -0,0 +1,30 @@
+
+--- FAIL: TestOverlappingParticipationKeys (0.00s)
+FAIL github.com/algorand/go-algorand/test/e2e-go/features/participation 0.069s...
+panic: CreateNetworkFromTemplate failed: open /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/ShortParticipationKeys.json: no such file or directory [recovered]
+ panic: CreateNetworkFromTemplate failed: open /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/ShortParticipationKeys.json: no such file or directory
+goroutine 119 [running]:
+testing.tRunner.func1.1(0x1515200, 0xc0002f0880)
+ /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/testing/testing.go:988 +0x452
+testing.tRunner.func1(0xc0002377a0)
+ /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/testing/testing.go:991 +0x600
+panic(0x1515200, 0xc0002f0880)
+ /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/runtime/panic.go:975 +0x3e3
+github.com/algorand/go-algorand/test/framework/fixtures.(*baseFixture).failOnError(...)
+ /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/baseFixture.go:69
+github.com/algorand/go-algorand/test/framework/fixtures.(*LibGoalFixture).failOnError(0xc0001d6800, 0x19ee400, 0xc0002db290, 0x16d71fc, 0x24)
+ /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/libgoalFixture.go:329 +0x1d8
+github.com/algorand/go-algorand/test/framework/fixtures.(*LibGoalFixture).setup(0xc0001d6800, 0x1a19280, 0xc0002377a0, 0x16d25f5, 0x20, 0xc000104ea0, 0x28, 0xc000091000)
+ /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/libgoalFixture.go:97 +0x490
+github.com/algorand/go-algorand/test/framework/fixtures.(*LibGoalFixture).SetupNoStart(0xc0001d6800, 0x1a19280, 0xc0002377a0, 0xc000104ea0, 0x28)
+ /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/libgoalFixture.go:73 +0x92
+github.com/algorand/go-algorand/test/framework/fixtures.(*RestClientFixture).SetupNoStart(...)
+ /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/restClientFixture.go:50
+github.com/algorand/go-algorand/test/e2e-go/features/participation.TestOverlappingParticipationKeys(0xc0002377a0)
+ /home/travis/gopath/src/github.com/algorand/go-algorand/test/e2e-go/features/participation/overlappingParticipationKeys_test.go:58 +0x3a0
+testing.tRunner(0xc0002377a0, 0x17230a0)
+ /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/testing/testing.go:1039 +0x1ec
+created by testing.(*T).Run
+ /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/testing/testing.go:1090 +0x701
+
+FAIL github.com/algorand/go-algorand/test/e2e-go/features/participation 0.069s
diff --git a/debug/logfilter/main.go b/debug/logfilter/main.go
new file mode 100644
index 000000000..265c501fe
--- /dev/null
+++ b/debug/logfilter/main.go
@@ -0,0 +1,148 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+// logfilter buffer go test output and make sure to limit the output to only the error-related stuff.
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+type test struct {
+ name string
+ outputBuffer string
+}
+
+func logFilter(inFile io.Reader, outFile io.Writer) int {
+ scanner := bufio.NewScanner(inFile)
+
+ tests := make(map[string]test)
+ currentTestName := ""
+ incomingFails := false
+ // packageOutputBuffer is used to buffer messages that are package-oriented. i.e. TestMain() generated messages,
+ // which are called before any test starts to run.
+ packageOutputBuffer := ""
+ for scanner.Scan() {
+ line := scanner.Text()
+ if len(line) == 0 {
+ continue
+ }
+ if strings.HasPrefix(line, "=== RUN") {
+ var testName string
+ fmt.Sscanf(line, "=== RUN %s", &testName)
+ currentTestName = testName
+ if _, have := tests[currentTestName]; !have {
+ tests[currentTestName] = test{name: currentTestName}
+ }
+ continue
+ }
+ if strings.HasPrefix(line, "=== CONT") {
+ var testName string
+ fmt.Sscanf(line, "=== CONT %s", &testName)
+ currentTestName = testName
+ if _, have := tests[currentTestName]; !have {
+ panic(fmt.Errorf("test %s is missing", currentTestName))
+ }
+ continue
+ }
+ if strings.HasPrefix(line, "=== PAUSE") {
+ var testName string
+ fmt.Sscanf(line, "=== PAUSE %s", &testName)
+ currentTestName = ""
+ if _, have := tests[testName]; !have {
+ panic(fmt.Errorf("test %s is missing", testName))
+ }
+ continue
+ }
+ if idx := strings.Index(line, "--- PASS:"); idx >= 0 {
+ var testName string
+ fmt.Sscanf(line[idx:], "--- PASS: %s", &testName)
+ if _, have := tests[testName]; !have {
+ fmt.Fprintf(outFile, "%s\r\n%s\r\n", line, packageOutputBuffer)
+ packageOutputBuffer = ""
+ } else {
+ fmt.Fprintf(outFile, line+"\r\n")
+ delete(tests, testName)
+ currentTestName = ""
+ }
+ continue
+ }
+ if idx := strings.Index(line, "--- FAIL:"); idx >= 0 {
+ incomingFails = true
+ var testName string
+ fmt.Sscanf(line[idx:], "--- FAIL: %s", &testName)
+ test, have := tests[testName]
+ if !have {
+ fmt.Fprintf(outFile, "%s\r\n%s\r\n", line, packageOutputBuffer)
+ packageOutputBuffer = ""
+ } else {
+ fmt.Fprintf(outFile, test.outputBuffer+"\r\n")
+ fmt.Fprintf(outFile, line+"\r\n")
+ test.outputBuffer = ""
+ tests[testName] = test
+ currentTestName = ""
+ }
+ continue
+ }
+ // otherwise, add the line to the current test ( if there is such )
+ currentTest, have := tests[currentTestName]
+ if have {
+ currentTest.outputBuffer += "\r\n" + line
+ tests[currentTestName] = currentTest
+ continue
+ }
+ // no current test is only legit if we're PASS, FAIL or package test line summary.
+ if line == "PASS" || line == "FAIL" {
+ continue
+ }
+ if strings.HasPrefix(line, "ok ") {
+ fmt.Fprintf(outFile, line+"\r\n")
+ packageOutputBuffer = ""
+ continue
+ }
+ if strings.HasPrefix(line, "FAIL ") {
+ incomingFails = true
+ if len(packageOutputBuffer) > 0 {
+ fmt.Fprintf(outFile, line+"...\r\n%s\r\n", packageOutputBuffer)
+ }
+ packageOutputBuffer = ""
+ fmt.Fprintf(outFile, line+"\r\n")
+ continue
+ }
+ // this is package-oriented output
+ packageOutputBuffer += line + "\r\n"
+ }
+ scannerErr := scanner.Err()
+ if scannerErr != nil {
+ if currentTestName != "" && tests[currentTestName].outputBuffer != "" {
+ fmt.Fprint(outFile, tests[currentTestName].outputBuffer)
+ }
+ fmt.Fprintf(outFile, "logfilter: the following error received on the input stream : %v\r\n", scannerErr)
+ }
+ if incomingFails {
+ return 1
+ }
+ return 0
+}
+
+func main() {
+ retCode := logFilter(os.Stdin, os.Stdout)
+ os.Exit(retCode)
+}
diff --git a/debug/logfilter/main_test.go b/debug/logfilter/main_test.go
new file mode 100644
index 000000000..2697a88c0
--- /dev/null
+++ b/debug/logfilter/main_test.go
@@ -0,0 +1,63 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+// logfilter buffer go test output and make sure to limit the output to only the error-related stuff.
+package main
+
+import (
+ "bytes"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestLogFilterExamples(t *testing.T) {
+ // iterate on all the example files in the local directroy.
+ exampleFiles := []string{}
+ filepath.Walk(".", func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if info.IsDir() {
+ return nil
+ }
+ if strings.Contains(info.Name(), "example") && strings.HasSuffix(info.Name(), ".in") {
+ exampleFiles = append(exampleFiles, path)
+ }
+ return nil
+ })
+ for _, exampleFileName := range exampleFiles {
+ // load the expected result file.
+ expectedOutFile := strings.Replace(exampleFileName, ".in", ".out.expected", 1)
+ expectedOutBytes, err := ioutil.ReadFile(expectedOutFile)
+ require.NoError(t, err)
+ expectedErrorCode := 0
+ if strings.Contains(string(expectedOutBytes), "FAIL") {
+ expectedErrorCode = 1
+ }
+
+ inFile, err := os.Open(exampleFileName)
+ require.NoError(t, err)
+ writingBuffer := bytes.NewBuffer(nil)
+ errCode := logFilter(inFile, writingBuffer)
+ require.Equal(t, expectedErrorCode, errCode)
+ require.Equal(t, string(expectedOutBytes), writingBuffer.String())
+ }
+}
diff --git a/docker/build/Dockerfile b/docker/build/Dockerfile
index 9a1bf07a2..6228677f7 100644
--- a/docker/build/Dockerfile
+++ b/docker/build/Dockerfile
@@ -6,7 +6,7 @@ WORKDIR /root
RUN wget --quiet https://dl.google.com/go/go${GOLANG_VERSION}.linux-amd64.tar.gz && tar -xvf go${GOLANG_VERSION}.linux-amd64.tar.gz && mv go /usr/local
ENV GOROOT=/usr/local/go \
GOPATH=$HOME/go \
- GOPROXY=https://gocenter.io,https://goproxy.io,direct
+ GOPROXY=https://pkg.go.dev,https://goproxy.io,direct
RUN mkdir -p $GOPATH/src/github.com/algorand
WORKDIR $GOPATH/src/github.com/algorand
COPY ./go-algorand ./go-algorand/
diff --git a/docker/build/Dockerfile-deploy b/docker/build/Dockerfile-deploy
index 63eb7cb5d..6f1ed8c85 100644
--- a/docker/build/Dockerfile-deploy
+++ b/docker/build/Dockerfile-deploy
@@ -6,7 +6,7 @@ WORKDIR /root
RUN wget --quiet https://dl.google.com/go/go${GOLANG_VERSION}.linux-amd64.tar.gz && tar -xvf go${GOLANG_VERSION}.linux-amd64.tar.gz && mv go /usr/local
ENV GOROOT=/usr/local/go \
GOPATH=$HOME/go \
- GOPROXY=https://gocenter.io,https://goproxy.io,direct
+ GOPROXY=https://pkg.go.dev,https://goproxy.io,direct
RUN mkdir -p $GOPATH/src/github.com/algorand
WORKDIR $GOPATH/src/github.com/algorand
COPY . ./go-algorand/
diff --git a/docker/build/cicd.alpine.Dockerfile b/docker/build/cicd.alpine.Dockerfile
index 228a71ee8..e449d0b91 100644
--- a/docker/build/cicd.alpine.Dockerfile
+++ b/docker/build/cicd.alpine.Dockerfile
@@ -25,7 +25,7 @@ RUN apk add dpkg && \
COPY . $GOPATH/src/github.com/algorand/go-algorand
WORKDIR $GOPATH/src/github.com/algorand/go-algorand
ENV GCC_CONFIG="--with-arch=armv6" \
- GOPROXY=https://gocenter.io,https://goproxy.io,direct
+ GOPROXY=https://pkg.go.dev,https://goproxy.io,direct
RUN make ci-deps && make clean
RUN rm -rf $GOPATH/src/github.com/algorand/go-algorand && \
mkdir -p $GOPATH/src/github.com/algorand/go-algorand
diff --git a/docker/build/cicd.centos.Dockerfile b/docker/build/cicd.centos.Dockerfile
index b36ceca93..07c1e6fcb 100644
--- a/docker/build/cicd.centos.Dockerfile
+++ b/docker/build/cicd.centos.Dockerfile
@@ -18,7 +18,7 @@ ENV GOROOT=/usr/local/go \
RUN mkdir -p $GOPATH/src/github.com/algorand
COPY . $GOPATH/src/github.com/algorand/go-algorand
ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH \
- GOPROXY=https://gocenter.io
+ GOPROXY=https://pkg.go.dev
WORKDIR $GOPATH/src/github.com/algorand/go-algorand
RUN make ci-deps && make clean
RUN rm -rf $GOPATH/src/github.com/algorand/go-algorand && \
diff --git a/docker/build/cicd.ubuntu.Dockerfile b/docker/build/cicd.ubuntu.Dockerfile
index f30ec85cc..c12f3da60 100644
--- a/docker/build/cicd.ubuntu.Dockerfile
+++ b/docker/build/cicd.ubuntu.Dockerfile
@@ -15,7 +15,7 @@ ENV GOROOT=/usr/local/go \
RUN mkdir -p $GOPATH/src/github.com/algorand
COPY . $GOPATH/src/github.com/algorand/go-algorand
ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH \
- GOPROXY=https://gocenter.io
+ GOPROXY=https://pkg.go.dev
WORKDIR $GOPATH/src/github.com/algorand/go-algorand
RUN make ci-deps && make clean
RUN rm -rf $GOPATH/src/github.com/algorand/go-algorand && \
diff --git a/gen/generate.go b/gen/generate.go
index fd06634d0..4c14f61f9 100644
--- a/gen/generate.go
+++ b/gen/generate.go
@@ -18,7 +18,9 @@ package gen
import (
"fmt"
+ "io"
"io/ioutil"
+ "math"
"os"
"path/filepath"
"runtime"
@@ -55,15 +57,44 @@ type genesisAllocation struct {
Online basics.Status
}
-// GenerateGenesisFiles generates the genesis.json file and wallet files for a give genesis configuration.
-func GenerateGenesisFiles(genesisData GenesisData, consensus config.ConsensusProtocols, outDir string, verbose bool) error {
- err := os.Mkdir(outDir, os.ModeDir|os.FileMode(0777))
- if err != nil && os.IsNotExist(err) {
- return fmt.Errorf("couldn't make output directory '%s': %v", outDir, err.Error())
+func u64absDiff(a, b uint64) uint64 {
+ if a > b {
+ return a - b
+ }
+ if b > a {
+ return b - a
+ }
+ return 0
+}
+
+// testable inner function that doesn't touch filesystem
+func setupGenerateGenesisFiles(genesisData *GenesisData, consensus config.ConsensusProtocols, verboseOut io.Writer) (proto protocol.ConsensusVersion, consensusParams config.ConsensusParams, allocation []genesisAllocation, err error) {
+ err = nil
+ // Backwards compatibility with older genesis files: if the consensus
+ // protocol version is not specified, default to V0.
+ proto = genesisData.ConsensusProtocol
+ if proto == protocol.ConsensusVersion("") {
+ proto = protocol.ConsensusCurrentVersion
+ }
+
+ // Backwards compatibility with older genesis files: if the fee sink
+ // or the rewards pool is not specified, set their defaults.
+ if (genesisData.FeeSink == basics.Address{}) {
+ genesisData.FeeSink = defaultSinkAddr
+ }
+ if (genesisData.RewardsPool == basics.Address{}) {
+ genesisData.RewardsPool = defaultPoolAddr
+ }
+
+ var ok bool
+ consensusParams, ok = consensus[proto]
+ if !ok {
+ err = fmt.Errorf("protocol %s not supported", proto)
+ return
}
var sum uint64
- allocation := make([]genesisAllocation, len(genesisData.Wallets))
+ allocation = make([]genesisAllocation, len(genesisData.Wallets))
for i, wallet := range genesisData.Wallets {
acct := genesisAllocation{
@@ -79,35 +110,50 @@ func GenerateGenesisFiles(genesisData GenesisData, consensus config.ConsensusPro
}
if sum != TotalMoney {
- panic(fmt.Sprintf("Amounts don't add up to TotalMoney - off by %v", int64(TotalMoney)-int64(sum)))
- }
-
- // Backwards compatibility with older genesis files: if the consensus
- // protocol version is not specified, default to V0.
- proto := genesisData.ConsensusProtocol
- if proto == protocol.ConsensusVersion("") {
- proto = protocol.ConsensusCurrentVersion
+ fsum := float64(sum)
+ ftot := float64(TotalMoney)
+ if (math.Abs((fsum-ftot)/ftot) < 0.01) && (u64absDiff(sum, TotalMoney) < 10000) {
+ if verboseOut != nil {
+ fmt.Fprintf(verboseOut, "doing roundoff fixup expected total money %d actual sum %d\n", TotalMoney, sum)
+ }
+ // wallet stake is a float and roundoff might happen but we might be close enough to do fixup
+ i := 0
+ for sum != TotalMoney {
+ if sum < TotalMoney {
+ allocation[i].Stake++
+ sum++
+ } else {
+ if allocation[i].Stake > consensusParams.MinBalance {
+ allocation[i].Stake--
+ sum--
+ }
+ }
+ i = (i + 1) % len(allocation)
+ }
+ } else {
+ panic(fmt.Sprintf("Amounts don't add up to TotalMoney - off by %v", int64(TotalMoney)-int64(sum)))
+ }
}
+ return
+}
- // Backwards compatibility with older genesis files: if the fee sink
- // or the rewards pool is not specified, set their defaults.
- if (genesisData.FeeSink == basics.Address{}) {
- genesisData.FeeSink = defaultSinkAddr
- }
- if (genesisData.RewardsPool == basics.Address{}) {
- genesisData.RewardsPool = defaultPoolAddr
+// GenerateGenesisFiles generates the genesis.json file and wallet files for a give genesis configuration.
+func GenerateGenesisFiles(genesisData GenesisData, consensus config.ConsensusProtocols, outDir string, verboseOut io.Writer) error {
+ proto, consensusParams, allocation, err := setupGenerateGenesisFiles(&genesisData, consensus, verboseOut)
+ if err != nil {
+ return err
}
- consensusParams, ok := consensus[proto]
- if !ok {
- return fmt.Errorf("protocol %s not supported", proto)
+ err = os.Mkdir(outDir, os.ModeDir|os.FileMode(0777))
+ if err != nil && os.IsNotExist(err) {
+ return fmt.Errorf("couldn't make output directory '%s': %v", outDir, err.Error())
}
- return generateGenesisFiles(outDir, proto, consensusParams, genesisData.NetworkName, genesisData.VersionModifier, allocation, genesisData.FirstPartKeyRound, genesisData.LastPartKeyRound, genesisData.PartKeyDilution, genesisData.FeeSink, genesisData.RewardsPool, genesisData.Comment, verbose)
+ return generateGenesisFiles(outDir, proto, consensusParams, genesisData.NetworkName, genesisData.VersionModifier, allocation, genesisData.FirstPartKeyRound, genesisData.LastPartKeyRound, genesisData.PartKeyDilution, genesisData.FeeSink, genesisData.RewardsPool, genesisData.Comment, verboseOut)
}
func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion, protoParams config.ConsensusParams, netName string, schemaVersionModifier string,
- allocation []genesisAllocation, firstWalletValid uint64, lastWalletValid uint64, partKeyDilution uint64, feeSink, rewardsPool basics.Address, comment string, verbose bool) (err error) {
+ allocation []genesisAllocation, firstWalletValid uint64, lastWalletValid uint64, partKeyDilution uint64, feeSink, rewardsPool basics.Address, comment string, verboseOut io.Writer) (err error) {
genesisAddrs := make(map[string]basics.Address)
records := make(map[string]basics.AccountData)
@@ -127,6 +173,7 @@ func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion,
concurrentWalletGenerators := runtime.NumCPU() * 2
errorsChannel := make(chan error, concurrentWalletGenerators)
+ verbose := verboseOut != nil
verbosedOutput := make(chan string)
var creatingWalletsWaitGroup sync.WaitGroup
var writeMu deadlock.Mutex
@@ -142,7 +189,7 @@ func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion,
return
}
var root account.Root
- var part account.Participation
+ var part account.PersistedParticipation
wfilename := filepath.Join(outDir, config.RootKeyFilename(wallet.Name))
pfilename := filepath.Join(outDir, config.PartKeyFilename(wallet.Name, firstWalletValid, lastWalletValid))
@@ -243,7 +290,7 @@ func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion,
// create a listener for the verbosedOutput
go func() {
for textOut := range verbosedOutput {
- fmt.Printf("%s\n", textOut)
+ fmt.Fprintf(verboseOut, "%s\n", textOut)
}
}()
}
@@ -269,7 +316,7 @@ func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion,
genesisAddrs["RewardsPool"] = rewardsPool
if verbose {
- fmt.Println(protoVersion, protoParams.MinBalance)
+ fmt.Fprintln(verboseOut, protoVersion, protoParams.MinBalance)
}
records["FeeSink"] = basics.AccountData{
@@ -350,7 +397,7 @@ func loadRootKey(filename string) (root account.Root, rootDB db.Accessor, err er
}
// If err != nil, partDB needs to be closed.
-func loadPartKeys(filename string) (part account.Participation, partDB db.Accessor, err error) {
+func loadPartKeys(filename string) (part account.PersistedParticipation, partDB db.Accessor, err error) {
if !util.FileExists(filename) {
err = os.ErrNotExist
return
diff --git a/gen/generate_test.go b/gen/generate_test.go
index b09e9aa5d..c8a6656d8 100644
--- a/gen/generate_test.go
+++ b/gen/generate_test.go
@@ -21,10 +21,13 @@ import (
"io/ioutil"
"os"
"path/filepath"
+ "strings"
"sync"
"testing"
+ "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
"github.com/stretchr/testify/require"
@@ -100,3 +103,18 @@ func TestLoadSingleRootKeyConcurrent(t *testing.T) {
}
wg.Wait()
}
+
+func TestGenesisRoundoff(t *testing.T) {
+ verbosity := strings.Builder{}
+ genesisData := DefaultGenesis
+ genesisData.NetworkName = "wat"
+ genesisData.ConsensusProtocol = protocol.ConsensusCurrentVersion // TODO: also check ConsensusFuture ?
+ genesisData.Wallets = make([]WalletData, 15)
+ for i := range genesisData.Wallets {
+ genesisData.Wallets[i].Name = fmt.Sprintf("w%d", i)
+ genesisData.Wallets[i].Stake = 100.0 / float64(len(genesisData.Wallets))
+ }
+ _, _, _, err := setupGenerateGenesisFiles(&genesisData, config.Consensus, &verbosity)
+ require.NoError(t, err)
+ require.True(t, strings.Contains(verbosity.String(), "roundoff"))
+}
diff --git a/installer/config.json.example b/installer/config.json.example
index 0c216be64..42b6361bd 100644
--- a/installer/config.json.example
+++ b/installer/config.json.example
@@ -1,15 +1,18 @@
{
- "Version": 15,
+ "Version": 16,
+ "AccountUpdatesStatsInterval": 5000000000,
"AccountsRebuildSynchronousMode": 1,
"AnnounceParticipationKey": true,
"Archival": false,
"BaseLoggerDebugLevel": 4,
+ "BlockServiceCustomFallbackEndpoints": "",
"BroadcastConnectionsLimit": -1,
"CadaverSizeTarget": 1073741824,
"CatchpointFileHistoryLength": 365,
"CatchpointInterval": 10000,
"CatchpointTracking": 0,
"CatchupBlockDownloadRetryAttempts": 1000,
+ "CatchupBlockValidateMode": 0,
"CatchupFailurePeerRefreshRate": 10,
"CatchupGossipBlockFetchTimeoutSec": 4,
"CatchupHTTPBlockFetchTimeoutSec": 4,
@@ -20,11 +23,15 @@
"DNSBootstrapID": "<network>.algorand.network",
"DNSSecurityFlags": 1,
"DeadlockDetection": 0,
+ "DisableLocalhostConnectionRateLimit": true,
+ "DisableNetworking": false,
"DisableOutgoingConnectionThrottling": false,
+ "EnableAccountUpdatesStats": false,
"EnableAgreementReporting": false,
"EnableAgreementTimeMetrics": false,
"EnableAssembleStats": false,
"EnableBlockService": false,
+ "EnableBlockServiceFallbackToArchiver": true,
"EnableCatchupFromArchiveServers": false,
"EnableDeveloperAPI": false,
"EnableGossipBlockService": true,
@@ -60,6 +67,7 @@
"OptimizeAccountsDatabaseOnStartup": false,
"OutgoingMessageFilterBucketCount": 3,
"OutgoingMessageFilterBucketSize": 128,
+ "ParticipationKeysRefreshInterval": 60000000000,
"PeerConnectionsUpdateInterval": 3600,
"PeerPingPeriodSeconds": 0,
"PriorityPeers": {},
diff --git a/ledger/README.md b/ledger/README.md
index 06d3010f3..999950ee5 100644
--- a/ledger/README.md
+++ b/ledger/README.md
@@ -26,7 +26,8 @@ The ledger exposes the following functions for managing the blocks:
- `Latest()` returns the last block added to the ledger.
-- `LatestCommitted()` returns the last block written to durable storage.
+- `LatestCommitted()` returns the last block written to durable storage
+ as well as the round of the latest block added to the ledger.
- `Block(round)` returns the block for `round`, or `ErrNoEntry` if no
such block has been added. Similarly, `BlockCert(round)` will return
diff --git a/ledger/accountdb.go b/ledger/accountdb.go
index 2019305d0..d9e96dd44 100644
--- a/ledger/accountdb.go
+++ b/ledger/accountdb.go
@@ -1180,7 +1180,7 @@ func accountsNewRound(tx *sql.Tx, updates compactAccountDeltas, creatables map[b
}
// totalsNewRounds updates the accountsTotals by applying series of round changes
-func totalsNewRounds(tx *sql.Tx, updates []ledgercore.AccountDeltas, compactUpdates compactAccountDeltas, accountTotals []ledgercore.AccountTotals, protos []config.ConsensusParams) (err error) {
+func totalsNewRounds(tx *sql.Tx, updates []ledgercore.AccountDeltas, compactUpdates compactAccountDeltas, accountTotals []ledgercore.AccountTotals, proto config.ConsensusParams) (err error) {
var ot basics.OverflowTracker
totals, err := accountsTotals(tx, false)
if err != nil {
@@ -1201,13 +1201,13 @@ func totalsNewRounds(tx *sql.Tx, updates []ledgercore.AccountDeltas, compactUpda
addr, data := updates[i].GetByIdx(j)
if oldAccountData, has := accounts[addr]; has {
- totals.DelAccount(protos[i], oldAccountData, &ot)
+ totals.DelAccount(proto, oldAccountData, &ot)
} else {
err = fmt.Errorf("missing old account data")
return
}
- totals.AddAccount(protos[i], data, &ot)
+ totals.AddAccount(proto, data, &ot)
accounts[addr] = data
}
}
@@ -1357,15 +1357,19 @@ func reencodeAccounts(ctx context.Context, tx *sql.Tx) (modifiedAccounts uint, e
return
}
-type merkleCommitter struct {
+// MerkleCommitter todo
+//msgp:ignore MerkleCommitter
+type MerkleCommitter struct {
tx *sql.Tx
deleteStmt *sql.Stmt
insertStmt *sql.Stmt
selectStmt *sql.Stmt
}
-func makeMerkleCommitter(tx *sql.Tx, staging bool) (mc *merkleCommitter, err error) {
- mc = &merkleCommitter{tx: tx}
+// MakeMerkleCommitter creates a MerkleCommitter object that implements the merkletrie.Committer interface allowing storing and loading
+// merkletrie pages from a sqlite database.
+func MakeMerkleCommitter(tx *sql.Tx, staging bool) (mc *MerkleCommitter, err error) {
+ mc = &MerkleCommitter{tx: tx}
accountHashesTable := "accounthashes"
if staging {
accountHashesTable = "catchpointaccounthashes"
@@ -1385,8 +1389,8 @@ func makeMerkleCommitter(tx *sql.Tx, staging bool) (mc *merkleCommitter, err err
return mc, nil
}
-// StorePage is the merkletrie.Committer interface implementation, stores a single page in a sqllite database table.
-func (mc *merkleCommitter) StorePage(page uint64, content []byte) error {
+// StorePage is the merkletrie.Committer interface implementation, stores a single page in a sqlite database table.
+func (mc *MerkleCommitter) StorePage(page uint64, content []byte) error {
if len(content) == 0 {
_, err := mc.deleteStmt.Exec(page)
return err
@@ -1395,8 +1399,8 @@ func (mc *merkleCommitter) StorePage(page uint64, content []byte) error {
return err
}
-// LoadPage is the merkletrie.Committer interface implementation, load a single page from a sqllite database table.
-func (mc *merkleCommitter) LoadPage(page uint64) (content []byte, err error) {
+// LoadPage is the merkletrie.Committer interface implementation, load a single page from a sqlite database table.
+func (mc *MerkleCommitter) LoadPage(page uint64) (content []byte, err error) {
err = mc.selectStmt.QueryRow(page).Scan(&content)
if err == sql.ErrNoRows {
content = nil
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index 0907fa897..9cbb3060f 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -552,7 +552,7 @@ func TestAccountDBRound(t *testing.T) {
updatesCnt := makeCompactAccountDeltas([]ledgercore.AccountDeltas{updates}, baseAccounts)
err = updatesCnt.accountsLoadOld(tx)
require.NoError(t, err)
- err = totalsNewRounds(tx, []ledgercore.AccountDeltas{updates}, updatesCnt, []ledgercore.AccountTotals{{}}, []config.ConsensusParams{proto})
+ err = totalsNewRounds(tx, []ledgercore.AccountDeltas{updates}, updatesCnt, []ledgercore.AccountTotals{{}}, proto)
require.NoError(t, err)
_, err = accountsNewRound(tx, updatesCnt, ctbsWithDeletes, proto, basics.Round(i))
require.NoError(t, err)
@@ -1052,7 +1052,7 @@ func TestAccountsDbQueriesCreateClose(t *testing.T) {
func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder bool) {
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesisInitState, _ := testGenerateInitState(b, protocol.ConsensusCurrentVersion)
+ genesisInitState, _ := testGenerateInitState(b, protocol.ConsensusCurrentVersion, 100)
const inMem = false
log := logging.TestingLog(b)
cfg := config.GetDefaultLocal()
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index d68a7a567..5dc94b966 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -79,7 +79,28 @@ const baseAccountsPendingAccountsBufferSize = 100000
// is being flushed into the main base account cache.
const baseAccountsPendingAccountsWarnThreshold = 85000
-var trieMemoryConfig = merkletrie.MemoryConfig{
+// initializeCachesReadaheadBlocksStream defines how many block we're going to attempt to queue for the
+// initializeCaches method before it can process and store the account changes to disk.
+const initializeCachesReadaheadBlocksStream = 4
+
+// initializeCachesRoundFlushInterval defines the number of rounds between every to consecutive
+// attempts to flush the memory account data to disk. Setting this value too high would increase
+// memory utilization. Setting this too low, would increase disk i/o.
+const initializeCachesRoundFlushInterval = 1000
+
+// initializingAccountCachesMessageTimeout controls the amount of time passes before we
+// log "initializingAccount initializing.." message to the log file. This is primarily for
+// nodes with slower disk access, where a feedback that the node is functioning correctly is needed.
+const initializingAccountCachesMessageTimeout = 3 * time.Second
+
+// accountsUpdatePerRoundHighWatermark is the warning watermark for updating accounts data that takes
+// longer than expected. We set it up here for one second per round, so that if we're bulk updating
+// four rounds, we would allow up to 4 seconds. This becomes important when supporting balances recovery
+// where we end up batching up to 1000 rounds in a single update.
+const accountsUpdatePerRoundHighWatermark = 1 * time.Second
+
+// TrieMemoryConfig is the memory configuration setup used for the merkle trie.
+var TrieMemoryConfig = merkletrie.MemoryConfig{
NodesCountPerPage: merkleCommitterNodesPerPage,
CachedNodesCount: trieCachedNodesCount,
PageFillFactor: 0.95,
@@ -107,7 +128,7 @@ type accountUpdates struct {
// initAccounts specifies initial account values for database.
initAccounts map[basics.Address]basics.AccountData
- // initProto specifies the initial consensus parameters.
+ // initProto specifies the initial consensus parameters at the genesis block.
initProto config.ConsensusParams
// dbDirectory is the directory where the ledger and block sql file resides as well as the parent directroy for the catchup files to be generated
@@ -152,9 +173,9 @@ type accountUpdates struct {
// appears in creatableDeltas
creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable
- // protos stores consensus parameters dbRound and every
- // round after it; i.e., protos is one longer than deltas.
- protos []config.ConsensusParams
+ // versions stores consensus version dbRound and every
+ // round after it; i.e., versions is one longer than deltas.
+ versions []protocol.ConsensusVersion
// totals stores the totals for dbRound and every round after it;
// i.e., totals is one longer than deltas.
@@ -222,6 +243,21 @@ type accountUpdates struct {
// baseAccounts stores the most recently used accounts, at exactly dbRound
baseAccounts lruAccounts
+
+ // the synchronous mode that would be used for the account database.
+ synchronousMode db.SynchronousMode
+
+ // the synchronous mode that would be used while the accounts database is being rebuilt.
+ accountsRebuildSynchronousMode db.SynchronousMode
+
+ // logAccountUpdatesMetrics is a flag for enable/disable metrics logging
+ logAccountUpdatesMetrics bool
+
+ // logAccountUpdatesInterval sets a time interval for metrics logging
+ logAccountUpdatesInterval time.Duration
+
+ // lastMetricsLogTime is the time when the previous metrics logging occurred
+ lastMetricsLogTime time.Time
}
type deferredCommit struct {
@@ -295,6 +331,13 @@ func (au *accountUpdates) initialize(cfg config.Local, dbPathPrefix string, gene
au.commitSyncerClosed = make(chan struct{})
close(au.commitSyncerClosed)
au.accountsReadCond = sync.NewCond(au.accountsMu.RLocker())
+ au.synchronousMode = db.SynchronousMode(cfg.LedgerSynchronousMode)
+ au.accountsRebuildSynchronousMode = db.SynchronousMode(cfg.AccountsRebuildSynchronousMode)
+
+ // log metrics
+ au.logAccountUpdatesMetrics = cfg.EnableAccountUpdatesStats
+ au.logAccountUpdatesInterval = cfg.AccountUpdatesStatsInterval
+
}
// loadFromDisk is the 2nd level initialization, and is required before the accountUpdates becomes functional
@@ -623,11 +666,10 @@ func (au *accountUpdates) committedUpTo(committedRound basics.Round) (retRound b
au.committedOffset <- dc
}
}()
-
retRound = basics.Round(0)
var pendingDeltas int
- lookback := basics.Round(au.protos[len(au.protos)-1].MaxBalLookback)
+ lookback := basics.Round(config.Consensus[au.versions[len(au.versions)-1]].MaxBalLookback)
if committedRound < lookback {
return
}
@@ -684,6 +726,8 @@ func (au *accountUpdates) committedUpTo(committedRound basics.Round) (retRound b
offset = uint64(newBase - au.dbRound)
+ offset = au.consecutiveVersion(offset)
+
// check to see if this is a catchpoint round
isCatchpointRound = ((offset + uint64(lookback+au.dbRound)) > 0) && (au.catchpointInterval != 0) && (0 == (uint64((offset + uint64(lookback+au.dbRound))) % au.catchpointInterval))
@@ -718,6 +762,22 @@ func (au *accountUpdates) committedUpTo(committedRound basics.Round) (retRound b
return
}
+func (au *accountUpdates) consecutiveVersion(offset uint64) uint64 {
+ // check if this update chunk spans across multiple consensus versions. If so, break it so that each update would tackle only a single
+ // consensus version.
+ if au.versions[1] != au.versions[offset] {
+ // find the tip point.
+ tipPoint := sort.Search(int(offset), func(i int) bool {
+ // we're going to search here for version inequality, with the assumption that consensus versions won't repeat.
+ // that allow us to support [ver1, ver1, ..., ver2, ver2, ..., ver3, ver3] but not [ver1, ver1, ..., ver2, ver2, ..., ver1, ver3].
+ return au.versions[1] != au.versions[1+i]
+ })
+ // no need to handle the case of "no found", or tipPoint==int(offset), since we already know that it's there.
+ offset = uint64(tipPoint)
+ }
+ return offset
+}
+
// newBlock is the accountUpdates implementation of the ledgerTracker interface. This is the "external" facing function
// which invokes the internal implementation after taking the lock.
func (au *accountUpdates) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
@@ -881,7 +941,9 @@ func (au *accountUpdates) totalsImpl(rnd basics.Round) (totals ledgercore.Accoun
return
}
-// initializeCaches fills up the accountUpdates cache with the most recent ~320 blocks
+// initializeCaches fills up the accountUpdates cache with the most recent ~320 blocks ( on normal execution ).
+// the method also support balances recovery in cases where the difference between the lastBalancesRound and the lastestBlockRound
+// is far greater than 320; in these cases, it would flush to disk periodically in order to avoid high memory consumption.
func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, writingCatchpointRound basics.Round) (catchpointBlockDigest crypto.Digest, err error) {
var blk bookkeeping.Block
var delta ledgercore.StateDelta
@@ -896,28 +958,147 @@ func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound,
}
}
- for lastBalancesRound < lastestBlockRound {
- next := lastBalancesRound + 1
+ skipAccountCacheMessage := make(chan struct{})
+ writeAccountCacheMessageCompleted := make(chan struct{})
+ defer func() {
+ close(skipAccountCacheMessage)
+ select {
+ case <-writeAccountCacheMessageCompleted:
+ if err == nil {
+ au.log.Infof("initializeCaches completed initializing account data caches")
+ }
+ default:
+ }
+ }()
- blk, err = au.ledger.Block(next)
- if err != nil {
- return
+ // this goroutine logs a message once if the parent function have not completed in initializingAccountCachesMessageTimeout seconds.
+ // the message is important, since we're blocking on the ledger block database here, and we want to make sure that we log a message
+ // within the above timeout.
+ go func() {
+ select {
+ case <-time.After(initializingAccountCachesMessageTimeout):
+ au.log.Infof("initializeCaches is initializing account data caches")
+ close(writeAccountCacheMessageCompleted)
+ case <-skipAccountCacheMessage:
+ }
+ }()
+
+ blocksStream := make(chan bookkeeping.Block, initializeCachesReadaheadBlocksStream)
+ blockEvalFailed := make(chan struct{}, 1)
+ var blockRetrievalError error
+ go func() {
+ defer close(blocksStream)
+ for roundNumber := lastBalancesRound + 1; roundNumber <= lastestBlockRound; roundNumber++ {
+ blk, blockRetrievalError = au.ledger.Block(roundNumber)
+ if blockRetrievalError != nil {
+ return
+ }
+ select {
+ case blocksStream <- blk:
+ case <-blockEvalFailed:
+ return
+ }
}
+ }()
+
+ lastFlushedRound := lastBalancesRound
+ const accountsCacheLoadingMessageInterval = 5 * time.Second
+ lastProgressMessage := time.Now().Add(-accountsCacheLoadingMessageInterval / 2)
+ // rollbackSynchronousMode ensures that we switch to "fast writing mode" when we start flushing out rounds to disk, and that
+ // we exit this mode when we're done.
+ rollbackSynchronousMode := false
+ defer func() {
+ if rollbackSynchronousMode {
+ // restore default synchronous mode
+ au.dbs.Wdb.SetSynchronousMode(context.Background(), au.synchronousMode, au.synchronousMode >= db.SynchronousModeFull)
+ }
+ }()
+
+ for blk := range blocksStream {
delta, err = au.ledger.trackerEvalVerified(blk, &accLedgerEval)
if err != nil {
+ close(blockEvalFailed)
return
}
au.newBlockImpl(blk, delta)
- lastBalancesRound = next
- if next == basics.Round(writingCatchpointRound) {
+ if blk.Round() == basics.Round(writingCatchpointRound) {
catchpointBlockDigest = blk.Digest()
}
+ // flush to disk if any of the following applies:
+ // 1. if we have loaded up more than initializeCachesRoundFlushInterval rounds since the last time we flushed the data to disk
+ // 2. if we completed the loading and we loaded up more than 320 rounds.
+ flushIntervalExceed := blk.Round()-lastFlushedRound > initializeCachesRoundFlushInterval
+ loadCompleted := (lastestBlockRound == blk.Round() && lastBalancesRound+basics.Round(blk.ConsensusProtocol().MaxBalLookback) < lastestBlockRound)
+ if flushIntervalExceed || loadCompleted {
+ // adjust the last flush time, so that we would not hold off the flushing due to "working too fast"
+ au.lastFlushTime = time.Now().Add(-balancesFlushInterval)
+
+ if !rollbackSynchronousMode {
+ // switch to rebuild synchronous mode to improve performance
+ au.dbs.Wdb.SetSynchronousMode(context.Background(), au.accountsRebuildSynchronousMode, au.accountsRebuildSynchronousMode >= db.SynchronousModeFull)
+
+ // flip the switch to rollback the synchronous mode once we're done.
+ rollbackSynchronousMode = true
+ }
+
+ // The unlocking/relocking here isn't very elegant, but it does get the work done :
+ // this method is called on either startup or when fast catchup is complete. In the former usecase, the
+ // locking here is not really needed since the system is only starting up, and there are no other
+ // consumers for the accounts update. On the latter usecase, the function would always have exactly 320 rounds,
+ // and therefore this wouldn't be an issue.
+ // However, to make sure we're not missing any other future codepath, unlocking here and re-locking later on is a pretty
+ // safe bet.
+ au.accountsMu.Unlock()
+
+ // flush the account data
+ au.committedUpTo(blk.Round())
+
+ // wait for the writing to complete.
+ au.waitAccountsWriting()
+
+ // The au.dbRound after writing should be ~320 behind the block round.
+ roundsBehind := blk.Round() - au.dbRound
+
+ au.accountsMu.Lock()
+
+ // are we too far behind ? ( taking into consideration the catchpoint writing, which can stall the writing for quite a bit )
+ if roundsBehind > initializeCachesRoundFlushInterval+basics.Round(au.catchpointInterval) {
+ // we're unable to persist changes. This is unexpected, but there is no point in keep trying batching additional changes since any futher changes
+ // would just accumulate in memory.
+ close(blockEvalFailed)
+ au.log.Errorf("initializeCaches was unable to fill up the account caches accounts round = %d, block round = %d. See above error for more details.", au.dbRound, blk.Round())
+ err = fmt.Errorf("initializeCaches failed to initialize the account data caches")
+ return
+ }
+
+ // and once we flushed it to disk, update the lastFlushedRound
+ lastFlushedRound = blk.Round()
+ }
+
+ // if enough time have passed since the last time we wrote a message to the log file then give the user an update about the progess.
+ if time.Now().Sub(lastProgressMessage) > accountsCacheLoadingMessageInterval {
+ // drop the initial message if we're got to this point since a message saying "still initializing" that comes after "is initializing" doesn't seems to be right.
+ select {
+ case skipAccountCacheMessage <- struct{}{}:
+ // if we got to this point, we should be able to close the writeAccountCacheMessageCompleted channel to have the "completed initializing" message written.
+ close(writeAccountCacheMessageCompleted)
+ default:
+ }
+ au.log.Infof("initializeCaches is still initializing account data caches, %d rounds loaded out of %d rounds", blk.Round()-lastBalancesRound, lastestBlockRound-lastBalancesRound)
+ lastProgressMessage = time.Now()
+ }
+
+ // prepare for the next iteration.
accLedgerEval.prevHeader = *delta.Hdr
}
+
+ if blockRetrievalError != nil {
+ err = blockRetrievalError
+ }
return
}
@@ -985,7 +1166,8 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker) (lastBalancesRo
if err != nil {
return
}
- au.protos = []config.ConsensusParams{config.Consensus[hdr.CurrentProtocol]}
+
+ au.versions = []protocol.ConsensusVersion{hdr.CurrentProtocol}
au.deltas = nil
au.creatableDeltas = nil
au.accounts = make(map[basics.Address]modifiedAccount)
@@ -1102,12 +1284,12 @@ func (au *accountUpdates) accountsInitialize(ctx context.Context, tx *sql.Tx) (b
}
// create the merkle trie for the balances
- committer, err := makeMerkleCommitter(tx, false)
+ committer, err := MakeMerkleCommitter(tx, false)
if err != nil {
return 0, fmt.Errorf("accountsInitialize was unable to makeMerkleCommitter: %v", err)
}
- trie, err := merkletrie.MakeTrie(committer, trieMemoryConfig)
+ trie, err := merkletrie.MakeTrie(committer, TrieMemoryConfig)
if err != nil {
return 0, fmt.Errorf("accountsInitialize was unable to MakeTrie: %v", err)
}
@@ -1341,14 +1523,14 @@ func (au *accountUpdates) upgradeDatabaseSchema4(ctx context.Context, tx *sql.Tx
}
if queryAddresses && len(addresses) > 0 {
- mc, err := makeMerkleCommitter(tx, false)
+ mc, err := MakeMerkleCommitter(tx, false)
if err != nil {
// at this point record deleted and DB is pruned for account data
- // if hash deletion fails just log it and do not about startup
+ // if hash deletion fails just log it and do not abort startup
au.log.Errorf("upgradeDatabaseSchema4: failed to create merkle committer: %v", err)
goto done
}
- trie, err := merkletrie.MakeTrie(mc, trieMemoryConfig)
+ trie, err := merkletrie.MakeTrie(mc, TrieMemoryConfig)
if err != nil {
au.log.Errorf("upgradeDatabaseSchema4: failed to create merkle trie: %v", err)
goto done
@@ -1434,7 +1616,7 @@ func (au *accountUpdates) accountsUpdateBalances(accountsDeltas compactAccountDe
deleteHash := accountHashBuilder(addr, delta.old.accountData, protocol.Encode(&delta.old.accountData))
deleted, err = au.balancesTrie.Delete(deleteHash)
if err != nil {
- return err
+ return fmt.Errorf("failed to delete hash '%s' from merkle trie for account %v: %w", hex.EncodeToString(deleteHash), addr, err)
}
if !deleted {
au.log.Warnf("failed to delete hash '%s' from merkle trie for account %v", hex.EncodeToString(deleteHash), addr)
@@ -1447,7 +1629,7 @@ func (au *accountUpdates) accountsUpdateBalances(accountsDeltas compactAccountDe
addHash := accountHashBuilder(addr, delta.new, protocol.Encode(&delta.new))
added, err = au.balancesTrie.Add(addHash)
if err != nil {
- return err
+ return fmt.Errorf("attempted to add duplicate hash '%s' to merkle trie for account %v: %w", hex.EncodeToString(addHash), addr, err)
}
if !added {
au.log.Warnf("attempted to add duplicate hash '%s' to merkle trie for account %v", hex.EncodeToString(addHash), addr)
@@ -1468,6 +1650,7 @@ func (au *accountUpdates) accountsUpdateBalances(accountsDeltas compactAccountDe
if accumulatedChanges > 0 {
_, err = au.balancesTrie.Commit()
}
+
return
}
@@ -1486,7 +1669,7 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
au.log.Panicf("accountUpdates: newBlockImpl %d too far in the future, dbRound %d, deltas %d", rnd, au.dbRound, len(au.deltas))
}
au.deltas = append(au.deltas, delta.Accts)
- au.protos = append(au.protos, proto)
+ au.versions = append(au.versions, blk.CurrentProtocol)
au.creatableDeltas = append(au.creatableDeltas, delta.Creatables)
au.roundDigest = append(au.roundDigest, blk.Digest())
au.deltasAccum = append(au.deltasAccum, delta.Accts.Len()+au.deltasAccum[len(au.deltasAccum)-1])
@@ -1576,7 +1759,7 @@ func (au *accountUpdates) lookupWithRewards(rnd basics.Round, addr basics.Addres
return
}
- rewardsProto = au.protos[offset]
+ rewardsProto = config.Consensus[au.versions[offset]]
rewardsLevel = au.roundTotals[offset].RewardsLevel
// we're testing the withRewards here and setting the defer function only once, and only if withRewards is true.
@@ -1865,6 +2048,17 @@ func (au *accountUpdates) commitSyncer(deferedCommits chan deferredCommit) {
// commitRound write to the database a "chunk" of rounds, and update the dbRound accordingly.
func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookback basics.Round) {
+ var stats telemetryspec.AccountsUpdateMetrics
+ var updateStats bool
+
+ if au.logAccountUpdatesMetrics {
+ now := time.Now()
+ if now.Sub(au.lastMetricsLogTime) >= au.logAccountUpdatesInterval {
+ updateStats = true
+ au.lastMetricsLogTime = now
+ }
+ }
+
defer au.accountsWriting.Done()
au.accountsMu.RLock()
@@ -1885,6 +2079,15 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb
// adjust the offset according to what happened meanwhile..
offset -= uint64(au.dbRound - dbRound)
+
+ // if this iteration need to flush out zero rounds, just return right away.
+ // this usecase can happen when two subsequent calls to committedUpTo concludes that the same rounds range need to be
+ // flush, without the commitRound have a chance of committing these rounds.
+ if offset == 0 {
+ au.accountsMu.RUnlock()
+ return
+ }
+
dbRound = au.dbRound
newBase := basics.Round(offset) + dbRound
@@ -1895,11 +2098,17 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb
deltas := make([]ledgercore.AccountDeltas, offset, offset)
creatableDeltas := make([]map[basics.CreatableIndex]ledgercore.ModifiedCreatable, offset, offset)
roundTotals := make([]ledgercore.AccountTotals, offset+1, offset+1)
- protos := make([]config.ConsensusParams, offset+1, offset+1)
copy(deltas, au.deltas[:offset])
copy(creatableDeltas, au.creatableDeltas[:offset])
copy(roundTotals, au.roundTotals[:offset+1])
- copy(protos, au.protos[:offset+1])
+
+ // verify version correctness : all the entries in the au.versions[1:offset+1] should have the *same* version, and the committedUpTo should be enforcing that.
+ if au.versions[1] != au.versions[offset] {
+ au.accountsMu.RUnlock()
+ au.log.Errorf("attempted to commit series of rounds with non-uniform consensus versions")
+ return
+ }
+ consensusVersion := au.versions[1]
var committedRoundDigest crypto.Digest
@@ -1932,15 +2141,18 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb
start := time.Now()
ledgerCommitroundCount.Inc(nil)
var updatedPersistedAccounts []persistedAccountData
+ if updateStats {
+ stats.DatabaseCommitDuration = time.Duration(time.Now().UnixNano())
+ }
err := au.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
treeTargetRound := basics.Round(0)
if au.catchpointInterval > 0 {
- mc, err0 := makeMerkleCommitter(tx, false)
+ mc, err0 := MakeMerkleCommitter(tx, false)
if err0 != nil {
return err0
}
if au.balancesTrie == nil {
- trie, err := merkletrie.MakeTrie(mc, trieMemoryConfig)
+ trie, err := merkletrie.MakeTrie(mc, TrieMemoryConfig)
if err != nil {
au.log.Warnf("unable to create merkle trie during committedUpTo: %v", err)
return err
@@ -1952,21 +2164,41 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb
treeTargetRound = dbRound + basics.Round(offset)
}
+ db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(accountsUpdatePerRoundHighWatermark*time.Duration(offset)))
+
+ if updateStats {
+ stats.OldAccountPreloadDuration = time.Duration(time.Now().UnixNano())
+ }
+
err = compactDeltas.accountsLoadOld(tx)
if err != nil {
return err
}
- err = totalsNewRounds(tx, deltas[:offset], compactDeltas, roundTotals[1:offset+1], protos[1:offset+1])
+ if updateStats {
+ stats.OldAccountPreloadDuration = time.Duration(time.Now().UnixNano()) - stats.OldAccountPreloadDuration
+ }
+
+ err = totalsNewRounds(tx, deltas[:offset], compactDeltas, roundTotals[1:offset+1], config.Consensus[consensusVersion])
if err != nil {
return err
}
+ if updateStats {
+ stats.MerkleTrieUpdateDuration = time.Duration(time.Now().UnixNano())
+ }
+
err = au.accountsUpdateBalances(compactDeltas)
if err != nil {
return err
}
+ if updateStats {
+ now := time.Duration(time.Now().UnixNano())
+ stats.MerkleTrieUpdateDuration = now - stats.MerkleTrieUpdateDuration
+ stats.AccountsWritingDuration = now
+ }
+
// the updates of the actual account data is done last since the accountsNewRound would modify the compactDeltas old values
// so that we can update the base account back.
updatedPersistedAccounts, err = accountsNewRound(tx, compactDeltas, compactCreatableDeltas, genesisProto, dbRound+basics.Round(offset))
@@ -1974,6 +2206,10 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb
return err
}
+ if updateStats {
+ stats.AccountsWritingDuration = time.Duration(time.Now().UnixNano()) - stats.AccountsWritingDuration
+ }
+
err = updateAccountsRound(tx, dbRound+basics.Round(offset), treeTargetRound)
if err != nil {
return err
@@ -1994,6 +2230,10 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb
return
}
+ if updateStats {
+ stats.DatabaseCommitDuration = time.Duration(time.Now().UnixNano()) - stats.DatabaseCommitDuration - stats.AccountsWritingDuration - stats.MerkleTrieUpdateDuration - stats.OldAccountPreloadDuration
+ }
+
if isCatchpointRound {
catchpointLabel, err = au.accountsCreateCatchpointLabel(dbRound+basics.Round(offset)+lookback, roundTotals[offset], committedRoundDigest, trieBalancesHash)
if err != nil {
@@ -2012,6 +2252,9 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb
}
updatingBalancesDuration := time.Now().Sub(beforeUpdatingBalancesTime)
+ if updateStats {
+ stats.MemoryUpdatesDuration = time.Duration(time.Now().UnixNano())
+ }
au.accountsMu.Lock()
// Drop reference counts to modified accounts, and evict them
// from in-memory cache when no references remain.
@@ -2057,13 +2300,18 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb
au.deltas = au.deltas[offset:]
au.deltasAccum = au.deltasAccum[offset:]
au.roundDigest = au.roundDigest[offset:]
- au.protos = au.protos[offset:]
+ au.versions = au.versions[offset:]
au.roundTotals = au.roundTotals[offset:]
au.creatableDeltas = au.creatableDeltas[offset:]
au.dbRound = newBase
au.lastFlushTime = flushTime
au.accountsMu.Unlock()
+
+ if updateStats {
+ stats.MemoryUpdatesDuration = time.Duration(time.Now().UnixNano()) - stats.MemoryUpdatesDuration
+ }
+
au.accountsReadCond.Broadcast()
if isCatchpointRound && au.archivalLedger && catchpointLabel != "" {
@@ -2072,6 +2320,18 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb
au.generateCatchpoint(basics.Round(offset)+dbRound+lookback, catchpointLabel, committedRoundDigest, updatingBalancesDuration)
}
+ // log telemetry event
+ if updateStats {
+ stats.StartRound = uint64(dbRound)
+ stats.RoundsCount = offset
+ stats.UpdatedAccountsCount = uint64(len(updatedPersistedAccounts))
+ stats.UpdatedCreatablesCount = uint64(len(compactCreatableDeltas))
+
+ var details struct {
+ }
+ au.log.Metrics(telemetryspec.Accounts, stats, details)
+ }
+
}
// compactCreatableDeltas takes an array of creatables map deltas ( one array entry per round ), and compact the array into a single
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index 5d4028f8c..9657d9ce0 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -372,7 +372,7 @@ func TestAcctUpdates(t *testing.T) {
blk.RewardsLevel = rewardLevel
blk.CurrentProtocol = protocol.ConsensusCurrentVersion
- delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len())
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
delta.Creatables = creatablesFromUpdates(base, updates, knownCreatables)
au.newBlock(blk, delta)
@@ -455,7 +455,7 @@ func TestAcctUpdatesFastUpdates(t *testing.T) {
blk.RewardsLevel = rewardLevel
blk.CurrentProtocol = protocol.ConsensusCurrentVersion
- delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len())
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
au.newBlock(blk, delta)
accts = append(accts, totals)
@@ -544,7 +544,7 @@ func BenchmarkBalancesChanges(b *testing.B) {
blk.RewardsLevel = rewardLevel
blk.CurrentProtocol = protocolVersion
- delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len())
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
au.newBlock(blk, delta)
accts = append(accts, totals)
@@ -671,7 +671,7 @@ func TestLargeAccountCountCatchpointGeneration(t *testing.T) {
blk.RewardsLevel = rewardLevel
blk.CurrentProtocol = testProtocolVersion
- delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len())
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
au.newBlock(blk, delta)
accts = append(accts, totals)
@@ -832,7 +832,7 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) {
blk.RewardsLevel = rewardLevel
blk.CurrentProtocol = testProtocolVersion
- delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, len(updates))
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, len(updates), 0)
for addr, ad := range updates {
delta.Accts.Upsert(addr, ad)
}
@@ -1498,7 +1498,7 @@ func TestReproducibleCatchpointLabels(t *testing.T) {
}
blk.RewardsLevel = rewardLevel
blk.CurrentProtocol = testProtocolVersion
- delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len())
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
delta.Creatables = creatablesFromUpdates(base, updates, knownCreatables)
au.newBlock(blk, delta)
@@ -1545,3 +1545,378 @@ func TestReproducibleCatchpointLabels(t *testing.T) {
}
}
}
+
+// TestCachesInitialization test the functionality of the initializeCaches cache.
+func TestCachesInitialization(t *testing.T) {
+ protocolVersion := protocol.ConsensusCurrentVersion
+ proto := config.Consensus[protocolVersion]
+
+ initialRounds := uint64(1)
+
+ ml := makeMockLedgerForTracker(t, true, int(initialRounds), protocolVersion)
+ ml.log.SetLevel(logging.Warn)
+ defer ml.Close()
+
+ accountsCount := 5
+ accts := []map[basics.Address]basics.AccountData{randomAccounts(accountsCount, true)}
+ rewardsLevels := []uint64{0}
+
+ pooldata := basics.AccountData{}
+ pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000 * 1000 * 1000
+ pooldata.Status = basics.NotParticipating
+ accts[0][testPoolAddr] = pooldata
+
+ sinkdata := basics.AccountData{}
+ sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ sinkdata.Status = basics.NotParticipating
+ accts[0][testSinkAddr] = sinkdata
+
+ au := &accountUpdates{}
+ au.initialize(config.GetDefaultLocal(), ".", proto, accts[0])
+ err := au.loadFromDisk(ml)
+ require.NoError(t, err)
+
+ // cover initialRounds genesis blocks
+ rewardLevel := uint64(0)
+ for i := 1; i < int(initialRounds); i++ {
+ accts = append(accts, accts[0])
+ rewardsLevels = append(rewardsLevels, rewardLevel)
+ }
+
+ recoveredLedgerRound := basics.Round(initialRounds + initializeCachesRoundFlushInterval + proto.MaxBalLookback + 1)
+
+ for i := basics.Round(initialRounds); i <= recoveredLedgerRound; i++ {
+ rewardLevelDelta := crypto.RandUint64() % 5
+ rewardLevel += rewardLevelDelta
+ accountChanges := 2
+
+ updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ prevTotals, err := au.Totals(basics.Round(i - 1))
+ require.NoError(t, err)
+
+ newPool := totals[testPoolAddr]
+ newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta
+ updates.Upsert(testPoolAddr, newPool)
+ totals[testPoolAddr] = newPool
+
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: basics.Round(i),
+ },
+ }
+ blk.RewardsLevel = rewardLevel
+ blk.CurrentProtocol = protocolVersion
+
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
+ delta.Accts.MergeAccounts(updates)
+ ml.addMockBlock(blockEntry{block: blk}, delta)
+ au.newBlock(blk, delta)
+ au.committedUpTo(basics.Round(i))
+ au.waitAccountsWriting()
+ accts = append(accts, totals)
+ rewardsLevels = append(rewardsLevels, rewardLevel)
+ }
+ au.close()
+
+ // create another mocked ledger, but this time with a fresh new tracker database.
+ ml2 := makeMockLedgerForTracker(t, true, int(initialRounds), protocolVersion)
+ ml2.log.SetLevel(logging.Warn)
+ defer ml2.Close()
+
+ // and "fix" it to contain the blocks and deltas from before.
+ ml2.blocks = ml.blocks
+ ml2.deltas = ml.deltas
+
+ au = &accountUpdates{}
+ au.initialize(config.GetDefaultLocal(), ".", proto, accts[0])
+ err = au.loadFromDisk(ml2)
+ require.NoError(t, err)
+ defer au.close()
+
+ // make sure the deltas array end up containing only the most recent 320 rounds.
+ require.Equal(t, int(proto.MaxBalLookback), len(au.deltas))
+ require.Equal(t, recoveredLedgerRound-basics.Round(proto.MaxBalLookback), au.dbRound)
+}
+
+// TestSplittingConsensusVersionCommits tests the a sequence of commits that spans over multiple consensus versions works correctly.
+func TestSplittingConsensusVersionCommits(t *testing.T) {
+ initProtocolVersion := protocol.ConsensusV20
+ initialProtoParams := config.Consensus[initProtocolVersion]
+
+ initialRounds := uint64(1)
+
+ ml := makeMockLedgerForTracker(t, true, int(initialRounds), initProtocolVersion)
+ ml.log.SetLevel(logging.Warn)
+ defer ml.Close()
+
+ accountsCount := 5
+ accts := []map[basics.Address]basics.AccountData{randomAccounts(accountsCount, true)}
+ rewardsLevels := []uint64{0}
+
+ pooldata := basics.AccountData{}
+ pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000 * 1000 * 1000
+ pooldata.Status = basics.NotParticipating
+ accts[0][testPoolAddr] = pooldata
+
+ sinkdata := basics.AccountData{}
+ sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ sinkdata.Status = basics.NotParticipating
+ accts[0][testSinkAddr] = sinkdata
+
+ au := &accountUpdates{}
+ au.initialize(config.GetDefaultLocal(), ".", initialProtoParams, accts[0])
+ err := au.loadFromDisk(ml)
+ require.NoError(t, err)
+ defer au.close()
+
+ // cover initialRounds genesis blocks
+ rewardLevel := uint64(0)
+ for i := 1; i < int(initialRounds); i++ {
+ accts = append(accts, accts[0])
+ rewardsLevels = append(rewardsLevels, rewardLevel)
+ }
+
+ extraRounds := uint64(39)
+
+ // write the extraRounds rounds so that we will fill up the queue.
+ for i := basics.Round(initialRounds); i < basics.Round(initialRounds+extraRounds); i++ {
+ rewardLevelDelta := crypto.RandUint64() % 5
+ rewardLevel += rewardLevelDelta
+ accountChanges := 2
+
+ updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ prevTotals, err := au.Totals(basics.Round(i - 1))
+ require.NoError(t, err)
+
+ newPool := totals[testPoolAddr]
+ newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta
+ updates.Upsert(testPoolAddr, newPool)
+ totals[testPoolAddr] = newPool
+
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: basics.Round(i),
+ },
+ }
+ blk.RewardsLevel = rewardLevel
+ blk.CurrentProtocol = initProtocolVersion
+
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
+ delta.Accts.MergeAccounts(updates)
+ ml.addMockBlock(blockEntry{block: blk}, delta)
+ au.newBlock(blk, delta)
+ accts = append(accts, totals)
+ rewardsLevels = append(rewardsLevels, rewardLevel)
+ }
+
+ newVersionBlocksCount := uint64(47)
+ newVersion := protocol.ConsensusV21
+ // add 47 more rounds that contains blocks using a newer consensus version, and stuff it with MaxBalLookback
+ lastRoundToWrite := basics.Round(initialRounds + initialProtoParams.MaxBalLookback + extraRounds + newVersionBlocksCount)
+ for i := basics.Round(initialRounds + extraRounds); i < lastRoundToWrite; i++ {
+ rewardLevelDelta := crypto.RandUint64() % 5
+ rewardLevel += rewardLevelDelta
+ accountChanges := 2
+
+ updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ prevTotals, err := au.Totals(basics.Round(i - 1))
+ require.NoError(t, err)
+
+ newPool := totals[testPoolAddr]
+ newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta
+ updates.Upsert(testPoolAddr, newPool)
+ totals[testPoolAddr] = newPool
+
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: basics.Round(i),
+ },
+ }
+ blk.RewardsLevel = rewardLevel
+ blk.CurrentProtocol = newVersion
+
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
+ delta.Accts.MergeAccounts(updates)
+ ml.addMockBlock(blockEntry{block: blk}, delta)
+ au.newBlock(blk, delta)
+ accts = append(accts, totals)
+ rewardsLevels = append(rewardsLevels, rewardLevel)
+ }
+ // now, commit and verify that the committedUpTo method broken the range correctly.
+ au.committedUpTo(lastRoundToWrite)
+ au.waitAccountsWriting()
+ require.Equal(t, basics.Round(initialRounds+extraRounds)-1, au.dbRound)
+
+}
+
+// TestSplittingConsensusVersionCommitsBoundry tests the a sequence of commits that spans over multiple consensus versions works correctly, and
+// in particular, complements TestSplittingConsensusVersionCommits by testing the commit boundry.
+func TestSplittingConsensusVersionCommitsBoundry(t *testing.T) {
+ initProtocolVersion := protocol.ConsensusV20
+ initialProtoParams := config.Consensus[initProtocolVersion]
+
+ initialRounds := uint64(1)
+
+ ml := makeMockLedgerForTracker(t, true, int(initialRounds), initProtocolVersion)
+ ml.log.SetLevel(logging.Warn)
+ defer ml.Close()
+
+ accountsCount := 5
+ accts := []map[basics.Address]basics.AccountData{randomAccounts(accountsCount, true)}
+ rewardsLevels := []uint64{0}
+
+ pooldata := basics.AccountData{}
+ pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000 * 1000 * 1000
+ pooldata.Status = basics.NotParticipating
+ accts[0][testPoolAddr] = pooldata
+
+ sinkdata := basics.AccountData{}
+ sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ sinkdata.Status = basics.NotParticipating
+ accts[0][testSinkAddr] = sinkdata
+
+ au := &accountUpdates{}
+ au.initialize(config.GetDefaultLocal(), ".", initialProtoParams, accts[0])
+ err := au.loadFromDisk(ml)
+ require.NoError(t, err)
+ defer au.close()
+
+ // cover initialRounds genesis blocks
+ rewardLevel := uint64(0)
+ for i := 1; i < int(initialRounds); i++ {
+ accts = append(accts, accts[0])
+ rewardsLevels = append(rewardsLevels, rewardLevel)
+ }
+
+ extraRounds := uint64(39)
+
+ // write extraRounds rounds so that we will fill up the queue.
+ for i := basics.Round(initialRounds); i < basics.Round(initialRounds+extraRounds); i++ {
+ rewardLevelDelta := crypto.RandUint64() % 5
+ rewardLevel += rewardLevelDelta
+ accountChanges := 2
+
+ updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ prevTotals, err := au.Totals(basics.Round(i - 1))
+ require.NoError(t, err)
+
+ newPool := totals[testPoolAddr]
+ newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta
+ updates.Upsert(testPoolAddr, newPool)
+ totals[testPoolAddr] = newPool
+
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: basics.Round(i),
+ },
+ }
+ blk.RewardsLevel = rewardLevel
+ blk.CurrentProtocol = initProtocolVersion
+
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
+ delta.Accts.MergeAccounts(updates)
+ ml.addMockBlock(blockEntry{block: blk}, delta)
+ au.newBlock(blk, delta)
+ accts = append(accts, totals)
+ rewardsLevels = append(rewardsLevels, rewardLevel)
+ }
+
+ newVersion := protocol.ConsensusV21
+ // add MaxBalLookback-extraRounds more rounds that contains blocks using a newer consensus version.
+ endOfFirstNewProtocolSegment := basics.Round(initialRounds + extraRounds + initialProtoParams.MaxBalLookback)
+ for i := basics.Round(initialRounds + extraRounds); i <= endOfFirstNewProtocolSegment; i++ {
+ rewardLevelDelta := crypto.RandUint64() % 5
+ rewardLevel += rewardLevelDelta
+ accountChanges := 2
+
+ updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ prevTotals, err := au.Totals(basics.Round(i - 1))
+ require.NoError(t, err)
+
+ newPool := totals[testPoolAddr]
+ newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta
+ updates.Upsert(testPoolAddr, newPool)
+ totals[testPoolAddr] = newPool
+
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: basics.Round(i),
+ },
+ }
+ blk.RewardsLevel = rewardLevel
+ blk.CurrentProtocol = newVersion
+
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
+ delta.Accts.MergeAccounts(updates)
+ ml.addMockBlock(blockEntry{block: blk}, delta)
+ au.newBlock(blk, delta)
+ accts = append(accts, totals)
+ rewardsLevels = append(rewardsLevels, rewardLevel)
+ }
+ // now, commit and verify that the committedUpTo method broken the range correctly.
+ au.committedUpTo(endOfFirstNewProtocolSegment)
+ au.waitAccountsWriting()
+ require.Equal(t, basics.Round(initialRounds+extraRounds)-1, au.dbRound)
+
+ // write additional extraRounds elements and verify these can be flushed.
+ for i := endOfFirstNewProtocolSegment + 1; i <= basics.Round(initialRounds+2*extraRounds+initialProtoParams.MaxBalLookback); i++ {
+ rewardLevelDelta := crypto.RandUint64() % 5
+ rewardLevel += rewardLevelDelta
+ accountChanges := 2
+
+ updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ prevTotals, err := au.Totals(basics.Round(i - 1))
+ require.NoError(t, err)
+
+ newPool := totals[testPoolAddr]
+ newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta
+ updates.Upsert(testPoolAddr, newPool)
+ totals[testPoolAddr] = newPool
+
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: basics.Round(i),
+ },
+ }
+ blk.RewardsLevel = rewardLevel
+ blk.CurrentProtocol = newVersion
+
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
+ delta.Accts.MergeAccounts(updates)
+ ml.addMockBlock(blockEntry{block: blk}, delta)
+ au.newBlock(blk, delta)
+ accts = append(accts, totals)
+ rewardsLevels = append(rewardsLevels, rewardLevel)
+ }
+ au.committedUpTo(endOfFirstNewProtocolSegment + basics.Round(extraRounds))
+ au.waitAccountsWriting()
+ require.Equal(t, basics.Round(initialRounds+2*extraRounds), au.dbRound)
+}
+
+// TestConsecutiveVersion tests the consecutiveVersion method correctness.
+func TestConsecutiveVersion(t *testing.T) {
+ var au accountUpdates
+ au.versions = []protocol.ConsensusVersion{
+ protocol.ConsensusV19,
+ protocol.ConsensusV20,
+ protocol.ConsensusV20,
+ protocol.ConsensusV20,
+ protocol.ConsensusV20,
+ protocol.ConsensusV21,
+ protocol.ConsensusV21,
+ protocol.ConsensusV21,
+ protocol.ConsensusV21,
+ protocol.ConsensusV21,
+ protocol.ConsensusV21,
+ protocol.ConsensusV22,
+ }
+ for offset := uint64(1); offset < uint64(len(au.versions)); offset++ {
+ co := au.consecutiveVersion(offset)
+ require.Equal(t, au.versions[1], au.versions[co])
+ }
+ au.versions = []protocol.ConsensusVersion{
+ protocol.ConsensusV19,
+ protocol.ConsensusV20,
+ protocol.ConsensusV21,
+ }
+}
diff --git a/ledger/appcow.go b/ledger/appcow.go
index f0904a6cc..53cb3a91f 100644
--- a/ledger/appcow.go
+++ b/ledger/appcow.go
@@ -505,7 +505,7 @@ func (cb *roundCowState) BuildEvalDelta(aidx basics.AppIndex, txn *transactions.
}
d := sdelta.kvCow.serialize()
- // noEmptyDeltas restricts prodicing empty local deltas in general
+ // noEmptyDeltas restricts producing empty local deltas in general
// but allows it for a period of time when a buggy version was live
noEmptyDeltas := cb.proto.NoEmptyLocalDeltas || (cb.mods.Hdr.CurrentProtocol == protocol.ConsensusV24) && (cb.mods.Hdr.NextProtocol != protocol.ConsensusV26)
if !noEmptyDeltas || len(d) != 0 {
diff --git a/ledger/appcow_test.go b/ledger/appcow_test.go
index 058377d02..de0cbdc7d 100644
--- a/ledger/appcow_test.go
+++ b/ledger/appcow_test.go
@@ -97,7 +97,7 @@ type modsData struct {
func getCow(creatables []modsData) *roundCowState {
cs := &roundCowState{
- mods: ledgercore.MakeStateDelta(&bookkeeping.BlockHeader{}, 0, 2),
+ mods: ledgercore.MakeStateDelta(&bookkeeping.BlockHeader{}, 0, 2, 0),
proto: config.Consensus[protocol.ConsensusCurrentVersion],
}
for _, e := range creatables {
diff --git a/ledger/applications.go b/ledger/applications.go
index 5f1d9bca4..54d87d634 100644
--- a/ledger/applications.go
+++ b/ledger/applications.go
@@ -76,8 +76,7 @@ func (al *logicLedger) Balance(addr basics.Address) (res basics.MicroAlgos, err
}
func (al *logicLedger) MinBalance(addr basics.Address, proto *config.ConsensusParams) (res basics.MicroAlgos, err error) {
- // Fetch record with pending rewards applied
- record, err := al.cow.Get(addr, true)
+ record, err := al.cow.Get(addr, false) // pending rewards unneeded
if err != nil {
return
}
diff --git a/ledger/applications_test.go b/ledger/applications_test.go
index 87d277ffd..5465cdccc 100644
--- a/ledger/applications_test.go
+++ b/ledger/applications_test.go
@@ -400,7 +400,7 @@ return`
program := ops.Program
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion)
+ genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4")
a.NoError(err)
@@ -621,7 +621,7 @@ return`
program := ops.Program
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion)
+ genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4")
a.NoError(err)
@@ -866,7 +866,7 @@ return`
program := ops.Program
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion)
+ genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4")
a.NoError(err)
@@ -1013,7 +1013,7 @@ return`
program := ops.Program
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion)
+ genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4")
a.NoError(err)
@@ -1200,7 +1200,7 @@ func testAppAccountDeltaIndicesCompatibility(t *testing.T, source string, accoun
// explicitly trigger compatibility mode
proto := config.Consensus[protocol.ConsensusV24]
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusV24)
+ genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusV24, 100)
creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4")
a.NoError(err)
diff --git a/ledger/apply/application.go b/ledger/apply/application.go
index e3665e1a7..261cb379a 100644
--- a/ledger/apply/application.go
+++ b/ledger/apply/application.go
@@ -274,25 +274,17 @@ func closeOutApplication(balances Balances, sender basics.Address, appIdx basics
return nil
}
-func checkPrograms(ac *transactions.ApplicationCallTxnFields, evalParams *logic.EvalParams, maxCost int) error {
- cost, err := logic.CheckStateful(ac.ApprovalProgram, *evalParams)
+func checkPrograms(ac *transactions.ApplicationCallTxnFields, evalParams *logic.EvalParams) error {
+ err := logic.CheckStateful(ac.ApprovalProgram, *evalParams)
if err != nil {
return fmt.Errorf("check failed on ApprovalProgram: %v", err)
}
- if cost > maxCost {
- return fmt.Errorf("ApprovalProgram too resource intensive. Cost is %d, max %d", cost, maxCost)
- }
-
- cost, err = logic.CheckStateful(ac.ClearStateProgram, *evalParams)
+ err = logic.CheckStateful(ac.ClearStateProgram, *evalParams)
if err != nil {
return fmt.Errorf("check failed on ClearStateProgram: %v", err)
}
- if cost > maxCost {
- return fmt.Errorf("ClearStateProgram too resource intensive. Cost is %d, max %d", cost, maxCost)
- }
-
return nil
}
@@ -344,8 +336,7 @@ func ApplicationCall(ac transactions.ApplicationCallTxnFields, header transactio
// If this txn is going to set new programs (either for creation or
// update), check that the programs are valid and not too expensive
if ac.ApplicationID == 0 || ac.OnCompletion == transactions.UpdateApplicationOC {
- maxCost := balances.ConsensusParams().MaxAppProgramCost
- err = checkPrograms(&ac, evalParams, maxCost)
+ err = checkPrograms(&ac, evalParams)
if err != nil {
return err
}
diff --git a/ledger/apply/application_test.go b/ledger/apply/application_test.go
index 23f1a4909..5ce8b07ed 100644
--- a/ledger/apply/application_test.go
+++ b/ledger/apply/application_test.go
@@ -349,32 +349,36 @@ func TestAppCallCheckPrograms(t *testing.T) {
var ac transactions.ApplicationCallTxnFields
var ep logic.EvalParams
- proto := config.Consensus[protocol.ConsensusFuture]
+ // This check is for static costs. v26 is last with static cost checking
+ proto := config.Consensus[protocol.ConsensusV26]
ep.Proto = &proto
- err := checkPrograms(&ac, &ep, 1)
+ proto.MaxAppProgramCost = 1
+ err := checkPrograms(&ac, &ep)
a.Error(err)
a.Contains(err.Error(), "check failed on ApprovalProgram")
program := []byte{2, 0x20, 1, 1, 0x22} // version, intcb, int 1
ac.ApprovalProgram = program
- err = checkPrograms(&ac, &ep, 1)
- a.Error(err)
- a.Contains(err.Error(), "ApprovalProgram too resource intensive")
+ ac.ClearStateProgram = program
- err = checkPrograms(&ac, &ep, 10)
+ err = checkPrograms(&ac, &ep)
a.Error(err)
- a.Contains(err.Error(), "check failed on ClearStateProgram")
+ a.Contains(err.Error(), "check failed on ApprovalProgram")
+
+ proto.MaxAppProgramCost = 10
+ err = checkPrograms(&ac, &ep)
+ a.NoError(err)
ac.ClearStateProgram = append(ac.ClearStateProgram, program...)
ac.ClearStateProgram = append(ac.ClearStateProgram, program...)
ac.ClearStateProgram = append(ac.ClearStateProgram, program...)
- err = checkPrograms(&ac, &ep, 10)
+ err = checkPrograms(&ac, &ep)
a.Error(err)
- a.Contains(err.Error(), "ClearStateProgram too resource intensive")
+ a.Contains(err.Error(), "check failed on ClearStateProgram")
ac.ClearStateProgram = program
- err = checkPrograms(&ac, &ep, 10)
+ err = checkPrograms(&ac, &ep)
a.NoError(err)
}
diff --git a/ledger/apply/keyreg.go b/ledger/apply/keyreg.go
index 3926f1ad8..532bfac51 100644
--- a/ledger/apply/keyreg.go
+++ b/ledger/apply/keyreg.go
@@ -17,6 +17,7 @@
package apply
import (
+ "errors"
"fmt"
"github.com/algorand/go-algorand/crypto"
@@ -24,8 +25,11 @@ import (
"github.com/algorand/go-algorand/data/transactions"
)
+var errKeyregGoingOnlineExpiredParticipationKey = errors.New("transaction tries to mark an account as online with last voting round in the past")
+var errKeyregGoingOnlineFirstVotingInFuture = errors.New("transaction tries to mark an account as online with first voting round beyond the next voting round")
+
// Keyreg applies a KeyRegistration transaction using the Balances interface.
-func Keyreg(keyreg transactions.KeyregTxnFields, header transactions.Header, balances Balances, spec transactions.SpecialAddresses, ad *transactions.ApplyData) error {
+func Keyreg(keyreg transactions.KeyregTxnFields, header transactions.Header, balances Balances, spec transactions.SpecialAddresses, ad *transactions.ApplyData, round basics.Round) error {
if header.Sender == spec.FeeSink {
return fmt.Errorf("cannot register participation key for fee sink's address %v ", header.Sender)
}
@@ -59,6 +63,15 @@ func Keyreg(keyreg transactions.KeyregTxnFields, header transactions.Header, bal
record.VoteLastValid = 0
record.VoteKeyDilution = 0
} else {
+
+ if balances.ConsensusParams().EnableKeyregCoherencyCheck {
+ if keyreg.VoteLast <= round {
+ return errKeyregGoingOnlineExpiredParticipationKey
+ }
+ if keyreg.VoteFirst > round+1 {
+ return errKeyregGoingOnlineFirstVotingInFuture
+ }
+ }
record.Status = basics.Online
record.VoteFirstValid = keyreg.VoteFirst
record.VoteLastValid = keyreg.VoteLast
diff --git a/ledger/apply/keyreg_test.go b/ledger/apply/keyreg_test.go
index 044d587a5..2be6a6b37 100644
--- a/ledger/apply/keyreg_test.go
+++ b/ledger/apply/keyreg_test.go
@@ -44,7 +44,8 @@ func (balances keyregTestBalances) GetCreator(cidx basics.CreatableIndex, ctype
return basics.Address{}, true, nil
}
-func (balances keyregTestBalances) Put(basics.Address, basics.AccountData) error {
+func (balances keyregTestBalances) Put(addr basics.Address, ad basics.AccountData) error {
+ balances.addrs[addr] = ad
return nil
}
@@ -95,11 +96,11 @@ func TestKeyregApply(t *testing.T) {
SelectionPK: vrfSecrets.PK,
},
}
- err := Keyreg(tx.KeyregTxnFields, tx.Header, makeMockBalances(protocol.ConsensusCurrentVersion), transactions.SpecialAddresses{FeeSink: feeSink}, nil)
+ err := Keyreg(tx.KeyregTxnFields, tx.Header, makeMockBalances(protocol.ConsensusCurrentVersion), transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(0))
require.NoError(t, err)
tx.Sender = feeSink
- err = Keyreg(tx.KeyregTxnFields, tx.Header, makeMockBalances(protocol.ConsensusCurrentVersion), transactions.SpecialAddresses{FeeSink: feeSink}, nil)
+ err = Keyreg(tx.KeyregTxnFields, tx.Header, makeMockBalances(protocol.ConsensusCurrentVersion), transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(0))
require.Error(t, err)
tx.Sender = src
@@ -108,19 +109,60 @@ func TestKeyregApply(t *testing.T) {
// Going from offline to online should be okay
mockBal.addrs[src] = basics.AccountData{Status: basics.Offline}
- err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil)
+ err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(0))
require.NoError(t, err)
// Going from online to nonparticipatory should be okay, if the protocol supports that
if mockBal.ConsensusParams().SupportBecomeNonParticipatingTransactions {
tx.KeyregTxnFields = transactions.KeyregTxnFields{}
tx.KeyregTxnFields.Nonparticipation = true
- err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil)
+ err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(0))
require.NoError(t, err)
// Nonparticipatory accounts should not be able to change status
mockBal.addrs[src] = basics.AccountData{Status: basics.NotParticipating}
- err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil)
+ err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(0))
require.Error(t, err)
}
+
+ mockBal.version = "future"
+ if mockBal.ConsensusParams().EnableKeyregCoherencyCheck {
+ tx = transactions.Transaction{
+ Type: protocol.KeyRegistrationTx,
+ Header: transactions.Header{
+ Sender: src,
+ Fee: basics.MicroAlgos{Raw: 1},
+ FirstValid: basics.Round(1000),
+ LastValid: basics.Round(1200),
+ },
+ KeyregTxnFields: transactions.KeyregTxnFields{
+ VotePK: crypto.OneTimeSignatureVerifier(secretParticipation.SignatureVerifier),
+ SelectionPK: vrfSecrets.PK,
+ VoteKeyDilution: 1000,
+ VoteFirst: 500,
+ VoteLast: 1000,
+ },
+ }
+ mockBal.addrs[src] = basics.AccountData{Status: basics.Offline}
+ err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(999))
+ require.NoError(t, err)
+
+ err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(1000))
+ require.Equal(t, errKeyregGoingOnlineExpiredParticipationKey, err)
+
+ err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(1001))
+ require.Equal(t, errKeyregGoingOnlineExpiredParticipationKey, err)
+
+ tx.KeyregTxnFields.VoteFirst = basics.Round(1100)
+ tx.KeyregTxnFields.VoteLast = basics.Round(1200)
+
+ err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(1098))
+ require.Equal(t, errKeyregGoingOnlineFirstVotingInFuture, err)
+
+ err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(1099))
+ require.NoError(t, err)
+
+ err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(1100))
+ require.NoError(t, err)
+ }
}
diff --git a/ledger/blockqueue.go b/ledger/blockqueue.go
index e23c98e72..25df6043a 100644
--- a/ledger/blockqueue.go
+++ b/ledger/blockqueue.go
@@ -173,10 +173,10 @@ func (bq *blockQueue) latest() basics.Round {
return bq.lastCommitted + basics.Round(len(bq.q))
}
-func (bq *blockQueue) latestCommitted() basics.Round {
+func (bq *blockQueue) latestCommitted() (basics.Round, basics.Round) {
bq.mu.Lock()
defer bq.mu.Unlock()
- return bq.lastCommitted
+ return bq.lastCommitted, bq.lastCommitted + basics.Round(len(bq.q))
}
func (bq *blockQueue) putBlock(blk bookkeeping.Block, cert agreement.Certificate) error {
diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go
index 7491297e0..dc0712857 100644
--- a/ledger/catchupaccessor.go
+++ b/ledger/catchupaccessor.go
@@ -466,19 +466,19 @@ func (c *CatchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
uncommitedHashesCount := 0
keepWriting := true
hashesWritten := uint64(0)
- var mc *merkleCommitter
+ var mc *MerkleCommitter
if progressUpdates != nil {
progressUpdates(hashesWritten)
}
err := wdb.Atomic(func(transactionCtx context.Context, tx *sql.Tx) (err error) {
// create the merkle trie for the balances
- mc, err = makeMerkleCommitter(tx, true)
+ mc, err = MakeMerkleCommitter(tx, true)
if err != nil {
return
}
- trie, err = merkletrie.MakeTrie(mc, trieMemoryConfig)
+ trie, err = merkletrie.MakeTrie(mc, TrieMemoryConfig)
return err
})
if err != nil {
@@ -501,7 +501,7 @@ func (c *CatchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
}
err = rdb.Atomic(func(transactionCtx context.Context, tx *sql.Tx) (err error) {
- mc, err = makeMerkleCommitter(tx, true)
+ mc, err = MakeMerkleCommitter(tx, true)
if err != nil {
return
}
@@ -528,7 +528,7 @@ func (c *CatchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
err = wdb.Atomic(func(transactionCtx context.Context, tx *sql.Tx) (err error) {
// set a long 30-second window for the evict before warning is generated.
db.ResetTransactionWarnDeadline(transactionCtx, tx, time.Now().Add(30*time.Second))
- mc, err = makeMerkleCommitter(tx, true)
+ mc, err = MakeMerkleCommitter(tx, true)
if err != nil {
return
}
@@ -557,7 +557,7 @@ func (c *CatchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
err = wdb.Atomic(func(transactionCtx context.Context, tx *sql.Tx) (err error) {
// set a long 30-second window for the evict before warning is generated.
db.ResetTransactionWarnDeadline(transactionCtx, tx, time.Now().Add(30*time.Second))
- mc, err = makeMerkleCommitter(tx, true)
+ mc, err = MakeMerkleCommitter(tx, true)
if err != nil {
return
}
@@ -618,12 +618,12 @@ func (c *CatchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl
ledgerVerifycatchpointCount.Inc(nil)
err = rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
// create the merkle trie for the balances
- mc, err0 := makeMerkleCommitter(tx, true)
+ mc, err0 := MakeMerkleCommitter(tx, true)
if err0 != nil {
return fmt.Errorf("unable to make MerkleCommitter: %v", err0)
}
var trie *merkletrie.Trie
- trie, err = merkletrie.MakeTrie(mc, trieMemoryConfig)
+ trie, err = merkletrie.MakeTrie(mc, TrieMemoryConfig)
if err != nil {
return fmt.Errorf("unable to make trie: %v", err)
}
diff --git a/ledger/catchupaccessor_test.go b/ledger/catchupaccessor_test.go
index a0f28b37d..96d7e1b5f 100644
--- a/ledger/catchupaccessor_test.go
+++ b/ledger/catchupaccessor_test.go
@@ -36,7 +36,7 @@ import (
)
func benchmarkRestoringFromCatchpointFileHelper(b *testing.B) {
- genesisInitState, _ := testGenerateInitState(b, protocol.ConsensusCurrentVersion)
+ genesisInitState, _ := testGenerateInitState(b, protocol.ConsensusCurrentVersion, 100)
const inMem = false
log := logging.TestingLog(b)
cfg := config.GetDefaultLocal()
diff --git a/ledger/compactcert.go b/ledger/compactcert.go
index 714a053ab..9d2f2d66f 100644
--- a/ledger/compactcert.go
+++ b/ledger/compactcert.go
@@ -33,7 +33,9 @@ import (
// votersHdr specifies the block that contains the Merkle commitment of
// the voters for this compact cert (and thus the compact cert is for
// votersHdr.Round() + CompactCertRounds).
-func AcceptableCompactCertWeight(votersHdr bookkeeping.BlockHeader, firstValid basics.Round) uint64 {
+//
+// logger must not be nil; use at least logging.Base()
+func AcceptableCompactCertWeight(votersHdr bookkeeping.BlockHeader, firstValid basics.Round, logger logging.Logger) uint64 {
proto := config.Consensus[votersHdr.CurrentProtocol]
certRound := votersHdr.Round + basics.Round(proto.CompactCertRounds)
total := votersHdr.CompactCert[protocol.CompactCertBasic].CompactCertVotersTotal
@@ -71,7 +73,7 @@ func AcceptableCompactCertWeight(votersHdr bookkeeping.BlockHeader, firstValid b
provenWeight, overflowed := basics.Muldiv(total.ToUint64(), uint64(proto.CompactCertWeightThreshold), 1<<32)
if overflowed || provenWeight > total.ToUint64() {
// Shouldn't happen, but a safe fallback is to accept a larger cert.
- logging.Base().Warnf("AcceptableCompactCertWeight(%d, %d, %d, %d) overflow provenWeight",
+ logger.Warnf("AcceptableCompactCertWeight(%d, %d, %d, %d) overflow provenWeight",
total, proto.CompactCertRounds, certRound, firstValid)
return 0
}
@@ -83,7 +85,7 @@ func AcceptableCompactCertWeight(votersHdr bookkeeping.BlockHeader, firstValid b
scaledWeight, overflowed := basics.Muldiv(total.ToUint64()-provenWeight, proto.CompactCertRounds/2-uint64(offset), proto.CompactCertRounds/2)
if overflowed {
// Shouldn't happen, but a safe fallback is to accept a larger cert.
- logging.Base().Warnf("AcceptableCompactCertWeight(%d, %d, %d, %d) overflow scaledWeight",
+ logger.Warnf("AcceptableCompactCertWeight(%d, %d, %d, %d) overflow scaledWeight",
total, proto.CompactCertRounds, certRound, firstValid)
return 0
}
@@ -91,7 +93,7 @@ func AcceptableCompactCertWeight(votersHdr bookkeeping.BlockHeader, firstValid b
w, overflowed := basics.OAdd(provenWeight, scaledWeight)
if overflowed {
// Shouldn't happen, but a safe fallback is to accept a larger cert.
- logging.Base().Warnf("AcceptableCompactCertWeight(%d, %d, %d, %d) overflow provenWeight (%d) + scaledWeight (%d)",
+ logger.Warnf("AcceptableCompactCertWeight(%d, %d, %d, %d) overflow provenWeight (%d) + scaledWeight (%d)",
total, proto.CompactCertRounds, certRound, firstValid, provenWeight, scaledWeight)
return 0
}
@@ -161,7 +163,7 @@ func validateCompactCert(certHdr bookkeeping.BlockHeader, cert compactcert.Cert,
nextCertRnd, certHdr.Round, votersRound)
}
- acceptableWeight := AcceptableCompactCertWeight(votersHdr, atRound)
+ acceptableWeight := AcceptableCompactCertWeight(votersHdr, atRound, logging.Base())
if cert.SignedWeight < acceptableWeight {
return fmt.Errorf("insufficient weight at %d: %d < %d",
atRound, cert.SignedWeight, acceptableWeight)
diff --git a/ledger/compactcert_test.go b/ledger/compactcert_test.go
new file mode 100644
index 000000000..6c1c22ad4
--- /dev/null
+++ b/ledger/compactcert_test.go
@@ -0,0 +1,168 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto/compactcert"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+func TestValidateCompactCert(t *testing.T) {
+ var certHdr bookkeeping.BlockHeader
+ var cert compactcert.Cert
+ var votersHdr bookkeeping.BlockHeader
+ var nextCertRnd basics.Round
+ var atRound basics.Round
+
+ // will definitely fail with nothing set up
+ err := validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
+ t.Log(err)
+ require.NotNil(t, err)
+
+ certHdr.CurrentProtocol = "TestValidateCompactCert"
+ certHdr.Round = 1
+ proto := config.Consensus[certHdr.CurrentProtocol]
+ proto.CompactCertRounds = 2
+ config.Consensus[certHdr.CurrentProtocol] = proto
+
+ err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
+ // still err, but a different err case to cover
+ t.Log(err)
+ require.NotNil(t, err)
+
+ certHdr.Round = 4
+ votersHdr.Round = 4
+ err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
+ // still err, but a different err case to cover
+ t.Log(err)
+ require.NotNil(t, err)
+
+ votersHdr.Round = 2
+ err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
+ // still err, but a different err case to cover
+ t.Log(err)
+ require.NotNil(t, err)
+
+ nextCertRnd = 4
+ err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
+ // still err, but a different err case to cover
+ t.Log(err)
+ require.NotNil(t, err)
+
+ votersHdr.CurrentProtocol = certHdr.CurrentProtocol
+ err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
+ // still err, but a different err case to cover
+ t.Log(err)
+ require.NotNil(t, err)
+
+ votersHdr.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState)
+ cc := votersHdr.CompactCert[protocol.CompactCertBasic]
+ cc.CompactCertVotersTotal.Raw = 100
+ votersHdr.CompactCert[protocol.CompactCertBasic] = cc
+ err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
+ // still err, but a different err case to cover
+ t.Log(err)
+ require.NotNil(t, err)
+
+ cert.SignedWeight = 101
+ err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
+ // still err, but a different err case to cover
+ t.Log(err)
+ require.NotNil(t, err)
+
+ // Above cases leave validateCompactCert() with 100% coverage.
+ // crypto/compactcert.Verify has its own tests
+}
+
+func TestAcceptableCompactCertWeight(t *testing.T) {
+ var votersHdr bookkeeping.BlockHeader
+ var firstValid basics.Round
+ logger := logging.TestingLog(t)
+
+ votersHdr.CurrentProtocol = "TestAcceptableCompactCertWeight"
+ proto := config.Consensus[votersHdr.CurrentProtocol]
+ proto.CompactCertRounds = 2
+ config.Consensus[votersHdr.CurrentProtocol] = proto
+ out := AcceptableCompactCertWeight(votersHdr, firstValid, logger)
+ require.Equal(t, uint64(0), out)
+
+ votersHdr.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState)
+ cc := votersHdr.CompactCert[protocol.CompactCertBasic]
+ cc.CompactCertVotersTotal.Raw = 100
+ votersHdr.CompactCert[protocol.CompactCertBasic] = cc
+ out = AcceptableCompactCertWeight(votersHdr, firstValid, logger)
+ require.Equal(t, uint64(100), out)
+
+ // this should exercise the second return case
+ firstValid = basics.Round(5)
+ out = AcceptableCompactCertWeight(votersHdr, firstValid, logger)
+ require.Equal(t, uint64(100), out)
+
+ firstValid = basics.Round(6)
+ proto.CompactCertWeightThreshold = 999999999
+ config.Consensus[votersHdr.CurrentProtocol] = proto
+ out = AcceptableCompactCertWeight(votersHdr, firstValid, logger)
+ require.Equal(t, uint64(0x17), out)
+
+ proto.CompactCertRounds = 10000
+ votersHdr.Round = 10000
+ firstValid = basics.Round(29000)
+ config.Consensus[votersHdr.CurrentProtocol] = proto
+ cc.CompactCertVotersTotal.Raw = 0x7fffffffffffffff
+ votersHdr.CompactCert[protocol.CompactCertBasic] = cc
+ proto.CompactCertWeightThreshold = 0x7fffffff
+ config.Consensus[votersHdr.CurrentProtocol] = proto
+ out = AcceptableCompactCertWeight(votersHdr, firstValid, logger)
+ require.Equal(t, uint64(0x4cd35a85213a92a2), out)
+
+ // Covers everything except "overflow that shouldn't happen" branches
+}
+
+func TestCompactCertParams(t *testing.T) {
+ var votersHdr bookkeeping.BlockHeader
+ var hdr bookkeeping.BlockHeader
+
+ res, err := CompactCertParams(votersHdr, hdr)
+ require.Error(t, err) // not enabled
+
+ votersHdr.CurrentProtocol = "TestCompactCertParams"
+ proto := config.Consensus[votersHdr.CurrentProtocol]
+ proto.CompactCertRounds = 2
+ config.Consensus[votersHdr.CurrentProtocol] = proto
+ votersHdr.Round = 1
+ res, err = CompactCertParams(votersHdr, hdr)
+ require.Error(t, err) // wrong round
+
+ votersHdr.Round = 2
+ hdr.Round = 3
+ res, err = CompactCertParams(votersHdr, hdr)
+ require.Error(t, err) // wrong round
+
+ hdr.Round = 4
+ res, err = CompactCertParams(votersHdr, hdr)
+ require.Equal(t, hdr.Round+1, res.SigRound)
+
+ // Covers all cases except overflow
+}
diff --git a/ledger/cow.go b/ledger/cow.go
index ab4e87c51..68d794341 100644
--- a/ledger/cow.go
+++ b/ledger/cow.go
@@ -75,7 +75,7 @@ func makeRoundCowState(b roundCowParent, hdr bookkeeping.BlockHeader, prevTimest
lookupParent: b,
commitParent: nil,
proto: config.Consensus[hdr.CurrentProtocol],
- mods: ledgercore.MakeStateDelta(&hdr, prevTimestamp, hint),
+ mods: ledgercore.MakeStateDelta(&hdr, prevTimestamp, hint, 0),
sdeltas: make(map[basics.Address]map[storagePtr]*storageDelta),
}
@@ -218,7 +218,7 @@ func (cb *roundCowState) child(hint int) *roundCowState {
lookupParent: cb,
commitParent: cb,
proto: cb.proto,
- mods: ledgercore.MakeStateDelta(cb.mods.Hdr, cb.mods.PrevTimestamp, hint),
+ mods: ledgercore.MakeStateDelta(cb.mods.Hdr, cb.mods.PrevTimestamp, hint, cb.mods.CompactCertNext),
sdeltas: make(map[basics.Address]map[storagePtr]*storageDelta),
}
diff --git a/ledger/cow_test.go b/ledger/cow_test.go
index a4df0f1c1..b09c479e8 100644
--- a/ledger/cow_test.go
+++ b/ledger/cow_test.go
@@ -29,6 +29,8 @@ import (
type mockLedger struct {
balanceMap map[basics.Address]basics.AccountData
+ blocks map[basics.Round]bookkeeping.BlockHeader
+ blockErr map[basics.Round]error
}
func (ml *mockLedger) lookup(addr basics.Address) (basics.AccountData, error) {
@@ -75,8 +77,13 @@ func (ml *mockLedger) compactCertNext() basics.Round {
return 0
}
-func (ml *mockLedger) blockHdr(_ basics.Round) (bookkeeping.BlockHeader, error) {
- return bookkeeping.BlockHeader{}, nil
+func (ml *mockLedger) blockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error) {
+ err, hit := ml.blockErr[rnd]
+ if hit {
+ return bookkeeping.BlockHeader{}, err
+ }
+ hdr := ml.blocks[rnd] // default struct is fine if nothing found
+ return hdr, nil
}
func checkCow(t *testing.T, cow *roundCowState, accts map[basics.Address]basics.AccountData) {
diff --git a/ledger/eval.go b/ledger/eval.go
index eb9596c9d..100d17df0 100644
--- a/ledger/eval.go
+++ b/ledger/eval.go
@@ -22,8 +22,6 @@ import (
"fmt"
"sync"
- "github.com/algorand/go-deadlock"
-
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/compactcert"
@@ -48,6 +46,11 @@ var ErrNoSpace = errors.New("block does not have space for transaction")
// many transactions in a block.
const maxPaysetHint = 20000
+// asyncAccountLoadingThreadCount controls how many go routines would be used
+// to load the account data before the eval() start processing individual
+// transaction group.
+const asyncAccountLoadingThreadCount = 4
+
type roundCowBase struct {
l ledgerForCowBase
@@ -73,9 +76,6 @@ type roundCowBase struct {
// are beyond the scope of this cache.
// The account data store here is always the account data without the rewards.
accounts map[basics.Address]basics.AccountData
-
- // accountsMu is the accounts read-write mutex, used to syncronize the access ot the accounts map.
- accountsMu deadlock.RWMutex
}
func (x *roundCowBase) getCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
@@ -86,18 +86,13 @@ func (x *roundCowBase) getCreator(cidx basics.CreatableIndex, ctype basics.Creat
// first, and if it cannot find it there, it would defer to the underlaying implementation.
// note that errors in accounts data retrivals are not cached as these typically cause the transaction evaluation to fail.
func (x *roundCowBase) lookup(addr basics.Address) (basics.AccountData, error) {
- x.accountsMu.RLock()
if accountData, found := x.accounts[addr]; found {
- x.accountsMu.RUnlock()
return accountData, nil
}
- x.accountsMu.RUnlock()
accountData, _, err := x.l.LookupWithoutRewards(x.rnd, addr)
if err == nil {
- x.accountsMu.Lock()
x.accounts[addr] = accountData
- x.accountsMu.Unlock()
}
return accountData, err
}
@@ -309,7 +304,7 @@ func (cs *roundCowState) ConsensusParams() config.ConsensusParams {
return cs.proto
}
-func (cs *roundCowState) compactCert(certRnd basics.Round, certType protocol.CompactCertType, cert compactcert.Cert, atRound basics.Round) error {
+func (cs *roundCowState) compactCert(certRnd basics.Round, certType protocol.CompactCertType, cert compactcert.Cert, atRound basics.Round, validate bool) error {
if certType != protocol.CompactCertBasic {
return fmt.Errorf("compact cert type %d not supported", certType)
}
@@ -322,15 +317,18 @@ func (cs *roundCowState) compactCert(certRnd basics.Round, certType protocol.Com
}
proto := config.Consensus[certHdr.CurrentProtocol]
- votersRnd := certRnd.SubSaturate(basics.Round(proto.CompactCertRounds))
- votersHdr, err := cs.blockHdr(votersRnd)
- if err != nil {
- return err
- }
- err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
- if err != nil {
- return err
+ if validate {
+ votersRnd := certRnd.SubSaturate(basics.Round(proto.CompactCertRounds))
+ votersHdr, err := cs.blockHdr(votersRnd)
+ if err != nil {
+ return err
+ }
+
+ err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
+ if err != nil {
+ return err
+ }
}
cs.setCompactCertNext(certRnd + basics.Round(proto.CompactCertRounds))
@@ -770,6 +768,49 @@ func (eval *BlockEvaluator) transactionGroup(txgroup []transactions.SignedTxnWit
return nil
}
+// Check the minimum balance requirement for the modified accounts in `cow`.
+func (eval *BlockEvaluator) checkMinBalance(cow *roundCowState) error {
+ rewardlvl := cow.rewardsLevel()
+ for _, addr := range cow.modifiedAccounts() {
+ // Skip FeeSink, RewardsPool, and CompactCertSender MinBalance checks here.
+ // There's only a few accounts, so space isn't an issue, and we don't
+ // expect them to have low balances, but if they do, it may cause
+ // surprises.
+ if addr == eval.block.FeeSink || addr == eval.block.RewardsPool ||
+ addr == transactions.CompactCertSender {
+ continue
+ }
+
+ data, err := cow.lookup(addr)
+ if err != nil {
+ return err
+ }
+
+ // It's always OK to have the account move to an empty state,
+ // because the accounts DB can delete it. Otherwise, we will
+ // enforce MinBalance.
+ if data.IsZero() {
+ continue
+ }
+
+ dataNew := data.WithUpdatedRewards(eval.proto, rewardlvl)
+ effectiveMinBalance := dataNew.MinBalance(&eval.proto)
+ if dataNew.MicroAlgos.Raw < effectiveMinBalance.Raw {
+ return fmt.Errorf("account %v balance %d below min %d (%d assets)",
+ addr, dataNew.MicroAlgos.Raw, effectiveMinBalance.Raw, len(dataNew.Assets))
+ }
+
+ // Check if we have exceeded the maximum minimum balance
+ if eval.proto.MaximumMinimumBalance != 0 {
+ if effectiveMinBalance.Raw > eval.proto.MaximumMinimumBalance {
+ return fmt.Errorf("account %v would use too much space after this transaction. Minimum balance requirements would be %d (greater than max %d)", addr, effectiveMinBalance.Raw, eval.proto.MaximumMinimumBalance)
+ }
+ }
+ }
+
+ return nil
+}
+
// transaction tentatively executes a new transaction as part of this block evaluation.
// If the transaction cannot be added to the block without violating some constraints,
// an error is returned and the block evaluator state is unchanged.
@@ -812,7 +853,7 @@ func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, evalParams *
}
// Apply the transaction, updating the cow balances
- applyData, err := applyTransaction(txn.Txn, cow, evalParams, spec, cow.txnCounter())
+ applyData, err := eval.applyTransaction(txn.Txn, cow, evalParams, spec, cow.txnCounter())
if err != nil {
return fmt.Errorf("transaction %v: %v", txid, err)
}
@@ -839,40 +880,13 @@ func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, evalParams *
// Check if any affected accounts dipped below MinBalance (unless they are
// completely zero, which means the account will be deleted.)
- rewardlvl := cow.rewardsLevel()
- for _, addr := range cow.modifiedAccounts() {
- // Skip FeeSink, RewardsPool, and CompactCertSender MinBalance checks here.
- // There's only a few accounts, so space isn't an issue, and we don't
- // expect them to have low balances, but if they do, it may cause
- // surprises.
- if addr == spec.FeeSink || addr == spec.RewardsPool || addr == transactions.CompactCertSender {
- continue
- }
-
- data, err := cow.lookup(addr)
+ // Only do those checks if we are validating or generating. It is useful to skip them
+ // if we cannot provide account data that contains enough information to
+ // compute the correct minimum balance (the case with indexer which does not store it).
+ if eval.validate || eval.generate {
+ err := eval.checkMinBalance(cow)
if err != nil {
- return err
- }
-
- // It's always OK to have the account move to an empty state,
- // because the accounts DB can delete it. Otherwise, we will
- // enforce MinBalance.
- if data.IsZero() {
- continue
- }
-
- dataNew := data.WithUpdatedRewards(eval.proto, rewardlvl)
- effectiveMinBalance := dataNew.MinBalance(&eval.proto)
- if dataNew.MicroAlgos.Raw < effectiveMinBalance.Raw {
- return fmt.Errorf("transaction %v: account %v balance %d below min %d (%d assets)",
- txid, addr, dataNew.MicroAlgos.Raw, effectiveMinBalance.Raw, len(dataNew.Assets))
- }
-
- // Check if we have exceeded the maximum minimum balance
- if eval.proto.MaximumMinimumBalance != 0 {
- if effectiveMinBalance.Raw > eval.proto.MaximumMinimumBalance {
- return fmt.Errorf("transaction %v: account %v would use too much space after this transaction. Minimum balance requirements would be %d (greater than max %d)", txid, addr, effectiveMinBalance.Raw, eval.proto.MaximumMinimumBalance)
- }
+ return fmt.Errorf("transaction %v: %w", txid, err)
}
}
@@ -883,7 +897,7 @@ func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, evalParams *
}
// applyTransaction changes the balances according to this transaction.
-func applyTransaction(tx transactions.Transaction, balances *roundCowState, evalParams *logic.EvalParams, spec transactions.SpecialAddresses, ctr uint64) (ad transactions.ApplyData, err error) {
+func (eval *BlockEvaluator) applyTransaction(tx transactions.Transaction, balances *roundCowState, evalParams *logic.EvalParams, spec transactions.SpecialAddresses, ctr uint64) (ad transactions.ApplyData, err error) {
params := balances.ConsensusParams()
// move fee to pool
@@ -918,7 +932,7 @@ func applyTransaction(tx transactions.Transaction, balances *roundCowState, eval
err = apply.Payment(tx.PaymentTxnFields, tx.Header, balances, spec, &ad)
case protocol.KeyRegistrationTx:
- err = apply.Keyreg(tx.KeyregTxnFields, tx.Header, balances, spec, &ad)
+ err = apply.Keyreg(tx.KeyregTxnFields, tx.Header, balances, spec, &ad, balances.round())
case protocol.AssetConfigTx:
err = apply.AssetConfig(tx.AssetConfigTxnFields, tx.Header, balances, spec, &ad, ctr)
@@ -933,7 +947,14 @@ func applyTransaction(tx transactions.Transaction, balances *roundCowState, eval
err = apply.ApplicationCall(tx.ApplicationCallTxnFields, tx.Header, balances, &ad, evalParams, ctr)
case protocol.CompactCertTx:
- err = balances.compactCert(tx.CertRound, tx.CertType, tx.Cert, tx.Header.FirstValid)
+ // in case of a CompactCertTx transaction, we want to "apply" it only in validate or generate mode. This will deviate the cow's CompactCertNext depending of
+ // whether we're in validate/generate mode or not, however - given that this variable in only being used in these modes, it would be safe.
+ // The reason for making this into an exception is that during initialization time, the accounts update is "converting" the recent 320 blocks into deltas to
+ // be stored in memory. These deltas don't care about the compact certificate, and so we can improve the node load time. Additionally, it save us from
+ // performing the validation during catchup, which is another performance boost.
+ if eval.validate || eval.generate {
+ err = balances.compactCert(tx.CertRound, tx.CertType, tx.Cert, tx.Header.FirstValid, eval.validate)
+ }
default:
err = fmt.Errorf("Unknown transaction type %v", tx.Type)
@@ -1131,7 +1152,7 @@ func (validator *evalTxValidator) run() {
// Validate: eval(ctx, l, blk, true, txcache, executionPool, true)
// AddBlock: eval(context.Background(), l, blk, false, txcache, nil, true)
// tracker: eval(context.Background(), l, blk, false, txcache, nil, false)
-func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, validate bool, txcache verify.VerifiedTransactionCache, executionPool execpool.BacklogPool, usePrefetch bool) (ledgercore.StateDelta, error) {
+func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, validate bool, txcache verify.VerifiedTransactionCache, executionPool execpool.BacklogPool) (ledgercore.StateDelta, error) {
eval, err := startEvaluator(l, blk.BlockHeader, len(blk.Payset), validate, false)
if err != nil {
return ledgercore.StateDelta{}, err
@@ -1144,17 +1165,14 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali
wg.Wait()
}()
- // If validationCtx or underlying ctx are Done, end prefetch
- if usePrefetch {
- wg.Add(1)
- go prefetchThread(validationCtx, eval.state.lookupParent, blk.Payset, &wg)
- }
-
// Next, transactions
paysetgroups, err := blk.DecodePaysetGroups()
if err != nil {
return ledgercore.StateDelta{}, err
}
+
+ paysetgroupsCh := loadAccounts(ctx, l, blk.Round()-1, paysetgroups, blk.BlockHeader.FeeSink, blk.ConsensusProtocol())
+
var txvalidator evalTxValidator
if validate {
_, ok := config.Consensus[blk.CurrentProtocol]
@@ -1172,8 +1190,25 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali
}
- for _, txgroup := range paysetgroups {
+ base := eval.state.lookupParent.(*roundCowBase)
+
+transactionGroupLoop:
+ for {
select {
+ case txgroup, ok := <-paysetgroupsCh:
+ if !ok {
+ break transactionGroupLoop
+ } else if txgroup.err != nil {
+ return ledgercore.StateDelta{}, err
+ }
+
+ for _, br := range txgroup.balances {
+ base.accounts[br.Addr] = br.AccountData
+ }
+ err = eval.TransactionGroup(txgroup.group)
+ if err != nil {
+ return ledgercore.StateDelta{}, err
+ }
case <-ctx.Done():
return ledgercore.StateDelta{}, ctx.Err()
case err, open := <-txvalidator.done:
@@ -1181,12 +1216,6 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali
if open && err != nil {
return ledgercore.StateDelta{}, err
}
- default:
- }
-
- err = eval.TransactionGroup(txgroup)
- if err != nil {
- return ledgercore.StateDelta{}, err
}
}
@@ -1219,31 +1248,183 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali
return eval.state.deltas(), nil
}
-func prefetchThread(ctx context.Context, state roundCowParent, payset []transactions.SignedTxnInBlock, wg *sync.WaitGroup) {
- defer wg.Done()
- maybelookup := func(addr basics.Address) {
- if addr.IsZero() {
- return
+// loadedTransactionGroup is a helper struct to allow asyncronious loading of the account data needed by the transaction groups
+type loadedTransactionGroup struct {
+ // group is the transaction group
+ group []transactions.SignedTxnWithAD
+ // balances is a list of all the balances that the transaction group refer to and are needed.
+ balances []basics.BalanceRecord
+ // err indicates whether any of the balances in this structure have failed to load. In case of an error, at least
+ // one of the entries in the balances would be uninitialized.
+ err error
+}
+
+// loadAccounts loads the account data for the provided transaction group list. It also loads the feeSink account and add it to the first returned transaction group.
+// The order of the transaction groups returned by the channel is identical to the one in the input array.
+func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, groups [][]transactions.SignedTxnWithAD, feeSinkAddr basics.Address, consensusParams config.ConsensusParams) chan loadedTransactionGroup {
+ outChan := make(chan loadedTransactionGroup, len(groups))
+ go func() {
+ // groupTask helps to organize the account loading for each transaction group.
+ type groupTask struct {
+ // balances contains the loaded balances each transaction group have
+ balances []basics.BalanceRecord
+ // balancesCount is the number of balances that nees to be loaded per transaction group
+ balancesCount int
+ // done is a waiting channel for all the account data for the transaction group to be loaded
+ done chan error
}
- state.lookup(addr)
- }
- for _, stxn := range payset {
- select {
- case <-ctx.Done():
- return
- default:
+ // addrTask manage the loading of a single account address.
+ type addrTask struct {
+ // account address to fetch
+ address basics.Address
+ // a list of transaction group tasks that depends on this address
+ groups []*groupTask
+ // a list of indices into the groupTask.balances where the address would be stored
+ groupIndices []int
}
- state.lookup(stxn.Txn.Sender)
- maybelookup(stxn.Txn.Receiver)
- maybelookup(stxn.Txn.CloseRemainderTo)
- maybelookup(stxn.Txn.AssetSender)
- maybelookup(stxn.Txn.AssetReceiver)
- maybelookup(stxn.Txn.AssetCloseTo)
- maybelookup(stxn.Txn.FreezeAccount)
- for _, xa := range stxn.Txn.Accounts {
- maybelookup(xa)
+ defer close(outChan)
+
+ accountTasks := make(map[basics.Address]*addrTask)
+ maxAddressesPerTransaction := 7 + consensusParams.MaxAppTxnAccounts
+ addressesCh := make(chan *addrTask, len(groups)*consensusParams.MaxTxGroupSize*maxAddressesPerTransaction)
+ // totalBalances counts the total number of balances over all the transaction groups
+ totalBalances := 0
+
+ initAccount := func(addr basics.Address, wg *groupTask) {
+ if addr.IsZero() {
+ return
+ }
+ if task, have := accountTasks[addr]; !have {
+ task := &addrTask{
+ address: addr,
+ groups: make([]*groupTask, 1, 4),
+ groupIndices: make([]int, 1, 4),
+ }
+ task.groups[0] = wg
+ task.groupIndices[0] = wg.balancesCount
+
+ accountTasks[addr] = task
+ addressesCh <- task
+ } else {
+ task.groups = append(task.groups, wg)
+ task.groupIndices = append(task.groupIndices, wg.balancesCount)
+ }
+ wg.balancesCount++
+ totalBalances++
}
- }
+ // add the fee sink address to the accountTasks/addressesCh so that it will be loaded first.
+ if len(groups) > 0 {
+ task := &addrTask{
+ address: feeSinkAddr,
+ }
+ addressesCh <- task
+ accountTasks[feeSinkAddr] = task
+ }
+
+ // iterate over the transaction groups and add all their account addresses to the list
+ groupsReady := make([]*groupTask, len(groups))
+ for i, group := range groups {
+ task := &groupTask{}
+ groupsReady[i] = task
+ for _, stxn := range group {
+ initAccount(stxn.Txn.Sender, task)
+ initAccount(stxn.Txn.Receiver, task)
+ initAccount(stxn.Txn.CloseRemainderTo, task)
+ initAccount(stxn.Txn.AssetSender, task)
+ initAccount(stxn.Txn.AssetReceiver, task)
+ initAccount(stxn.Txn.AssetCloseTo, task)
+ initAccount(stxn.Txn.FreezeAccount, task)
+ for _, xa := range stxn.Txn.Accounts {
+ initAccount(xa, task)
+ }
+ }
+ }
+
+ // Add fee sink to the first group
+ if len(groupsReady) > 0 {
+ initAccount(feeSinkAddr, groupsReady[0])
+ }
+ close(addressesCh)
+
+ // updata all the groups task :
+ // allocate the correct number of balances, as well as
+ // enough space on the "done" channel.
+ allBalances := make([]basics.BalanceRecord, totalBalances)
+ usedBalances := 0
+ for _, gr := range groupsReady {
+ gr.balances = allBalances[usedBalances : usedBalances+gr.balancesCount]
+ gr.done = make(chan error, gr.balancesCount)
+ usedBalances += gr.balancesCount
+ }
+
+ // create few go-routines to load asyncroniously the account data.
+ for i := 0; i < asyncAccountLoadingThreadCount; i++ {
+ go func() {
+ for {
+ select {
+ case task, ok := <-addressesCh:
+ // load the address
+ if !ok {
+ // the channel got closed, which mean we're done.
+ return
+ }
+ // lookup the account data directly from the ledger.
+ acctData, _, err := l.LookupWithoutRewards(rnd, task.address)
+ br := basics.BalanceRecord{
+ Addr: task.address,
+ AccountData: acctData,
+ }
+ // if there is no error..
+ if err == nil {
+ // update all the group tasks with the new acquired balance.
+ for i, wg := range task.groups {
+ wg.balances[task.groupIndices[i]] = br
+ // write a nil to indicate that we're loaded one entry.
+ wg.done <- nil
+ }
+ } else {
+ // there was an error loading that entry.
+ for _, wg := range task.groups {
+ // notify the channel of the error.
+ wg.done <- err
+ }
+ }
+ case <-ctx.Done():
+ // if the context was canceled, abort right away.
+ return
+ }
+
+ }
+ }()
+ }
+
+ // iterate on the transaction groups tasks. This array retains the original order.
+ for i, wg := range groupsReady {
+ // Wait to receive wg.balancesCount nil error messages, one for each address referenced in this txn group.
+ for j := 0; j < wg.balancesCount; j++ {
+ select {
+ case err := <-wg.done:
+ if err != nil {
+ // if there is an error, report the error to the output channel.
+ outChan <- loadedTransactionGroup{
+ group: groups[i],
+ err: err,
+ }
+ return
+ }
+ case <-ctx.Done():
+ return
+ }
+ }
+ // if we had no error, write the result to the output channel.
+ // this write will not block since we preallocated enough space on the channel.
+ outChan <- loadedTransactionGroup{
+ group: groups[i],
+ balances: wg.balances,
+ }
+ }
+ }()
+ return outChan
}
// Validate uses the ledger to validate block blk as a candidate next block.
@@ -1251,7 +1432,7 @@ func prefetchThread(ctx context.Context, state roundCowParent, payset []transact
// not a valid block (e.g., it has duplicate transactions, overspends some
// account, etc).
func (l *Ledger) Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ValidatedBlock, error) {
- delta, err := eval(ctx, l, blk, true, l.verifiedTxnCache, executionPool, true)
+ delta, err := eval(ctx, l, blk, true, l.verifiedTxnCache, executionPool)
if err != nil {
return nil, err
}
diff --git a/ledger/eval_test.go b/ledger/eval_test.go
index 5142ffe14..1035c6ef9 100644
--- a/ledger/eval_test.go
+++ b/ledger/eval_test.go
@@ -18,6 +18,7 @@ package ledger
import (
"context"
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -31,6 +32,7 @@ import (
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/compactcert"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
@@ -85,6 +87,52 @@ func TestBlockEvaluator(t *testing.T) {
err = eval.Transaction(st, transactions.ApplyData{})
require.NoError(t, err)
+ // Broken signature should fail
+ stbad := st
+ st.Sig[2] ^= 8
+ txgroup := []transactions.SignedTxn{stbad}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+
+ // Repeat should fail
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.Error(t, err)
+
+ // out of range should fail
+ btxn := txn
+ btxn.FirstValid++
+ btxn.LastValid += 2
+ st = btxn.Sign(keys[0])
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.Error(t, err)
+
+ // bogus group should fail
+ btxn = txn
+ btxn.Group[1] = 1
+ st = btxn.Sign(keys[0])
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.Error(t, err)
+
+ // mixed fields should fail
+ btxn = txn
+ btxn.XferAsset = 3
+ st = btxn.Sign(keys[0])
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ // We don't test eval.Transaction() here because it doesn't check txn.WellFormed(), instead relying on that to have already been checked by the transaction pool.
+ // err = eval.Transaction(st, transactions.ApplyData{})
+ // require.Error(t, err)
+
selfTxn := transactions.Transaction{
Type: protocol.PaymentTx,
Header: transactions.Header{
@@ -99,9 +147,58 @@ func TestBlockEvaluator(t *testing.T) {
Amount: basics.MicroAlgos{Raw: 100},
},
}
- err = eval.Transaction(selfTxn.Sign(keys[2]), transactions.ApplyData{})
+ stxn := selfTxn.Sign(keys[2])
+
+ // TestTransactionGroup() and Transaction() should have the same outcome, but work slightly different code paths.
+ txgroup = []transactions.SignedTxn{stxn}
+ err = eval.TestTransactionGroup(txgroup)
+ require.NoError(t, err)
+
+ err = eval.Transaction(stxn, transactions.ApplyData{})
require.NoError(t, err)
+ t3 := txn
+ t3.Amount.Raw++
+ t4 := selfTxn
+ t4.Amount.Raw++
+
+ // a group without .Group should fail
+ s3 := t3.Sign(keys[0])
+ s4 := t4.Sign(keys[2])
+ txgroup = []transactions.SignedTxn{s3, s4}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ txgroupad := transactions.WrapSignedTxnsWithAD(txgroup)
+ err = eval.TransactionGroup(txgroupad)
+ require.Error(t, err)
+
+ // Test a group that should work
+ var group transactions.TxGroup
+ group.TxGroupHashes = []crypto.Digest{crypto.HashObj(t3), crypto.HashObj(t4)}
+ t3.Group = crypto.HashObj(group)
+ t4.Group = t3.Group
+ s3 = t3.Sign(keys[0])
+ s4 = t4.Sign(keys[2])
+ txgroup = []transactions.SignedTxn{s3, s4}
+ err = eval.TestTransactionGroup(txgroup)
+ require.NoError(t, err)
+
+ // disagreement on Group id should fail
+ t4bad := t4
+ t4bad.Group[3] ^= 3
+ s4bad := t4bad.Sign(keys[2])
+ txgroup = []transactions.SignedTxn{s3, s4bad}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ txgroupad = transactions.WrapSignedTxnsWithAD(txgroup)
+ err = eval.TransactionGroup(txgroupad)
+ require.Error(t, err)
+
+ // missing part of the group should fail
+ txgroup = []transactions.SignedTxn{s3}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+
validatedBlock, err := eval.GenerateBlock()
require.NoError(t, err)
@@ -420,6 +517,11 @@ ok:
}},
},
}
+ txgroup := []transactions.SignedTxn{stxn1, stxn2}
+ err = eval.TestTransactionGroup(txgroup)
+ if err != nil {
+ return eval, addrs[0], err
+ }
err = eval.transactionGroup(g)
return eval, addrs[0], err
}
@@ -514,6 +616,10 @@ func benchmarkBlockEvaluator(b *testing.B, inMem bool, withCrypto bool) {
for i := 0; i < numTxns; i++ {
sender := i % len(addrs)
receiver := (i + 1) % len(addrs)
+ // The following would create more random selection of accounts, and prevent a cache of half of the accounts..
+ // iDigest := crypto.Hash([]byte{byte(i), byte(i >> 8), byte(i >> 16), byte(i >> 24)})
+ // sender := (uint64(iDigest[0]) + uint64(iDigest[1])*256 + uint64(iDigest[2])*256*256) % uint64(len(addrs))
+ // receiver := (uint64(iDigest[4]) + uint64(iDigest[5])*256 + uint64(iDigest[6])*256*256) % uint64(len(addrs))
txn := transactions.Transaction{
Type: protocol.PaymentTx,
Header: transactions.Header{
@@ -557,7 +663,7 @@ func benchmarkBlockEvaluator(b *testing.B, inMem bool, withCrypto bool) {
if withCrypto {
_, err = l2.Validate(context.Background(), validatedBlock.blk, backlogPool)
} else {
- _, err = eval(context.Background(), l2, validatedBlock.blk, false, nil, nil, true)
+ _, err = eval(context.Background(), l2, validatedBlock.blk, false, nil, nil)
}
require.NoError(b, err)
}
@@ -568,3 +674,178 @@ func benchmarkBlockEvaluator(b *testing.B, inMem bool, withCrypto bool) {
b.StopTimer()
}
+
+func TestCowCompactCert(t *testing.T) {
+ var certRnd basics.Round
+ var certType protocol.CompactCertType
+ var cert compactcert.Cert
+ var atRound basics.Round
+ var validate bool
+ accts0 := randomAccounts(20, true)
+ blocks := make(map[basics.Round]bookkeeping.BlockHeader)
+ blockErr := make(map[basics.Round]error)
+ ml := mockLedger{balanceMap: accts0, blocks: blocks, blockErr: blockErr}
+ c0 := makeRoundCowState(&ml, bookkeeping.BlockHeader{}, 0, 0)
+
+ certType = protocol.CompactCertType(1234) // bad cert type
+ err := c0.compactCert(certRnd, certType, cert, atRound, validate)
+ require.Error(t, err)
+
+ // no certRnd block
+ certType = protocol.CompactCertBasic
+ noBlockErr := errors.New("no block")
+ blockErr[3] = noBlockErr
+ certRnd = 3
+ err = c0.compactCert(certRnd, certType, cert, atRound, validate)
+ require.Error(t, err)
+
+ // no votersRnd block
+ // this is slightly a mess of things that don't quite line up with likely usage
+ validate = true
+ var certHdr bookkeeping.BlockHeader
+ certHdr.CurrentProtocol = "TestCowCompactCert"
+ certHdr.Round = 1
+ proto := config.Consensus[certHdr.CurrentProtocol]
+ proto.CompactCertRounds = 2
+ config.Consensus[certHdr.CurrentProtocol] = proto
+ blocks[certHdr.Round] = certHdr
+
+ certHdr.Round = 15
+ blocks[certHdr.Round] = certHdr
+ certRnd = certHdr.Round
+ blockErr[13] = noBlockErr
+ err = c0.compactCert(certRnd, certType, cert, atRound, validate)
+ require.Error(t, err)
+
+ // validate fail
+ certHdr.Round = 1
+ certRnd = certHdr.Round
+ err = c0.compactCert(certRnd, certType, cert, atRound, validate)
+ require.Error(t, err)
+
+ // fall through to no err
+ validate = false
+ err = c0.compactCert(certRnd, certType, cert, atRound, validate)
+ require.NoError(t, err)
+
+ // 100% coverage
+}
+
+// a couple trivial tests that don't need setup
+// see TestBlockEvaluator for more
+func TestTestTransactionGroup(t *testing.T) {
+ var txgroup []transactions.SignedTxn
+ eval := BlockEvaluator{}
+ err := eval.TestTransactionGroup(txgroup)
+ require.NoError(t, err) // nothing to do, no problem
+
+ eval.proto = config.Consensus[protocol.ConsensusCurrentVersion]
+ txgroup = make([]transactions.SignedTxn, eval.proto.MaxTxGroupSize+1)
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err) // too many
+}
+
+// test BlockEvaluator.transactionGroup()
+// some trivial checks that require no setup
+func TestPrivateTransactionGroup(t *testing.T) {
+ var txgroup []transactions.SignedTxnWithAD
+ eval := BlockEvaluator{}
+ err := eval.transactionGroup(txgroup)
+ require.NoError(t, err) // nothing to do, no problem
+
+ eval.proto = config.Consensus[protocol.ConsensusCurrentVersion]
+ txgroup = make([]transactions.SignedTxnWithAD, eval.proto.MaxTxGroupSize+1)
+ err = eval.transactionGroup(txgroup)
+ require.Error(t, err) // too many
+}
+
+// BlockEvaluator.workaroundOverspentRewards() fixed a couple issues on testnet.
+// This is now part of history and has to be re-created when running catchup on testnet. So, test to ensure it keeps happenning.
+func TestTestnetFixup(t *testing.T) {
+ eval := &BlockEvaluator{}
+ var rewardPoolBalance basics.AccountData
+ rewardPoolBalance.MicroAlgos.Raw = 1234
+ var headerRound basics.Round
+ testnetGenesisHash, _ := crypto.DigestFromString("JBR3KGFEWPEE5SAQ6IWU6EEBZMHXD4CZU6WCBXWGF57XBZIJHIRA")
+
+ // not a fixup round, no change
+ headerRound = 1
+ poolOld, err := eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
+ require.Equal(t, rewardPoolBalance, poolOld)
+ require.NoError(t, err)
+
+ eval.genesisHash = testnetGenesisHash
+ eval.genesisHash[3]++
+
+ specialRounds := []basics.Round{1499995, 2926564}
+ for _, headerRound = range specialRounds {
+ poolOld, err = eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
+ require.Equal(t, rewardPoolBalance, poolOld)
+ require.NoError(t, err)
+ }
+
+ for _, headerRound = range specialRounds {
+ testnetFixupExecution(t, headerRound, 20000000000)
+ }
+ // do all the setup and do nothing for not a special round
+ testnetFixupExecution(t, specialRounds[0]+1, 0)
+}
+
+func testnetFixupExecution(t *testing.T, headerRound basics.Round, poolBonus uint64) {
+ testnetGenesisHash, _ := crypto.DigestFromString("JBR3KGFEWPEE5SAQ6IWU6EEBZMHXD4CZU6WCBXWGF57XBZIJHIRA")
+ // big setup so we can move some algos
+ // boilerplate like TestBlockEvaluator, but pretend to be testnet
+ genesisInitState, addrs, keys := genesis(10)
+ genesisInitState.Block.BlockHeader.GenesisHash = testnetGenesisHash
+ genesisInitState.Block.BlockHeader.GenesisID = "testnet"
+ genesisInitState.GenesisHash = testnetGenesisHash
+
+ // for addr, adata := range genesisInitState.Accounts {
+ // t.Logf("%s: %+v", addr.String(), adata)
+ // }
+ rewardPoolBalance := genesisInitState.Accounts[testPoolAddr]
+ nextPoolBalance := rewardPoolBalance.MicroAlgos.Raw + poolBonus
+
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ const inMem = true
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ l, err := OpenLedger(logging.Base(), dbName, inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+ defer l.Close()
+
+ newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0)
+ require.NoError(t, err)
+
+ // won't work before funding bank
+ if poolBonus > 0 {
+ _, err = eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
+ require.Error(t, err)
+ }
+
+ bankAddr, _ := basics.UnmarshalChecksumAddress("GD64YIY3TWGDMCNPP553DZPPR6LDUSFQOIJVFDPPXWEG3FVOJCCDBBHU5A")
+
+ // put some algos in the bank so that fixup can pull from this account
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: addrs[0],
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round(),
+ GenesisHash: testnetGenesisHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: bankAddr,
+ Amount: basics.MicroAlgos{Raw: 20000000000 * 10},
+ },
+ }
+ st := txn.Sign(keys[0])
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.NoError(t, err)
+
+ poolOld, err := eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
+ require.Equal(t, nextPoolBalance, poolOld.MicroAlgos.Raw)
+ require.NoError(t, err)
+}
diff --git a/ledger/ledger.go b/ledger/ledger.go
index 1faf3a13c..2b0dbf693 100644
--- a/ledger/ledger.go
+++ b/ledger/ledger.go
@@ -504,8 +504,10 @@ func (l *Ledger) Latest() basics.Round {
// LatestCommitted returns the last block round number written to
// persistent storage. This block, and all previous blocks, are
-// guaranteed to be available after a crash.
-func (l *Ledger) LatestCommitted() basics.Round {
+// guaranteed to be available after a crash. In addition, it returns
+// the latest block round number added to the ledger ( which will be
+// flushed to persistent storage later on )
+func (l *Ledger) LatestCommitted() (basics.Round, basics.Round) {
return l.blockQ.latestCommitted()
}
@@ -545,7 +547,7 @@ func (l *Ledger) BlockCert(rnd basics.Round) (blk bookkeeping.Block, cert agreem
func (l *Ledger) AddBlock(blk bookkeeping.Block, cert agreement.Certificate) error {
// passing nil as the executionPool is ok since we've asking the evaluator to skip verification.
- updates, err := eval(context.Background(), l, blk, false, l.verifiedTxnCache, nil, true)
+ updates, err := eval(context.Background(), l, blk, false, l.verifiedTxnCache, nil)
if err != nil {
return err
}
@@ -574,6 +576,7 @@ func (l *Ledger) AddValidatedBlock(vb ValidatedBlock, cert agreement.Certificate
}
l.headerCache.Put(vb.blk.Round(), vb.blk.BlockHeader)
l.trackers.newBlock(vb.blk, vb.delta)
+ l.log.Debugf("added blk %d", vb.blk.Round())
return nil
}
@@ -647,7 +650,7 @@ func (l *Ledger) trackerLog() logging.Logger {
// evaluator to shortcut the "main" ledger ( i.e. this struct ) and avoid taking the trackers lock a second time.
func (l *Ledger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger ledgerForEvaluator) (ledgercore.StateDelta, error) {
// passing nil as the executionPool is ok since we've asking the evaluator to skip verification.
- return eval(context.Background(), accUpdatesLedger, blk, false, l.verifiedTxnCache, nil, false)
+ return eval(context.Background(), accUpdatesLedger, blk, false, l.verifiedTxnCache, nil)
}
// IsWritingCatchpointFile returns true when a catchpoint file is being generated. The function is used by the catchup service
diff --git a/ledger/ledger_perf_test.go b/ledger/ledger_perf_test.go
index cea9aea43..b1b2244b4 100644
--- a/ledger/ledger_perf_test.go
+++ b/ledger/ledger_perf_test.go
@@ -319,7 +319,7 @@ func benchmarkFullBlocks(params testParams, b *testing.B) {
vc := verify.GetMockedCache(true)
b.ResetTimer()
for _, blk := range blocks {
- _, err = eval(context.Background(), l1, blk, true, vc, nil, true)
+ _, err = eval(context.Background(), l1, blk, true, vc, nil)
require.NoError(b, err)
err = l1.AddBlock(blk, cert)
require.NoError(b, err)
diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go
index 1080d87b1..d8745b7cf 100644
--- a/ledger/ledger_test.go
+++ b/ledger/ledger_test.go
@@ -19,6 +19,9 @@ package ledger
import (
"context"
"fmt"
+ "math/rand"
+ "os"
+ "runtime/pprof"
"testing"
"github.com/stretchr/testify/require"
@@ -63,7 +66,7 @@ func sign(secrets map[basics.Address]*crypto.SignatureSecrets, t transactions.Tr
}
}
-func testGenerateInitState(tb testing.TB, proto protocol.ConsensusVersion) (genesisInitState InitState, initKeys map[basics.Address]*crypto.SignatureSecrets) {
+func testGenerateInitState(tb testing.TB, proto protocol.ConsensusVersion, baseAlgoPerAccount int) (genesisInitState InitState, initKeys map[basics.Address]*crypto.SignatureSecrets) {
params := config.Consensus[proto]
poolAddr := testPoolAddr
sinkAddr := testSinkAddr
@@ -84,7 +87,7 @@ func testGenerateInitState(tb testing.TB, proto protocol.ConsensusVersion) (gene
for i := range genaddrs {
initKeys[genaddrs[i]] = gensecrets[i]
// Give each account quite a bit more balance than MinFee or MinBalance
- initAccounts[genaddrs[i]] = basics.MakeAccountData(basics.Online, basics.MicroAlgos{Raw: uint64((i + 100) * 100000)})
+ initAccounts[genaddrs[i]] = basics.MakeAccountData(basics.Online, basics.MicroAlgos{Raw: uint64((i + baseAlgoPerAccount) * 100000)})
}
initKeys[poolAddr] = poolSecret
initAccounts[poolAddr] = basics.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 1234567})
@@ -221,8 +224,27 @@ func (l *Ledger) appendUnvalidatedSignedTx(t *testing.T, initAccounts map[basics
return l.appendUnvalidated(blk)
}
+func (l *Ledger) addBlockTxns(t *testing.T, accounts map[basics.Address]basics.AccountData, stxns []transactions.SignedTxn, ad transactions.ApplyData) error {
+ blk := makeNewEmptyBlock(t, l, t.Name(), accounts)
+ proto := config.Consensus[blk.CurrentProtocol]
+ for _, stx := range stxns {
+ txib, err := blk.EncodeSignedTxn(stx, ad)
+ if err != nil {
+ return fmt.Errorf("could not sign txn: %s", err.Error())
+ }
+ if proto.TxnCounter {
+ blk.TxnCounter = blk.TxnCounter + 1
+ }
+ blk.Payset = append(blk.Payset, txib)
+ }
+ var err error
+ blk.TxnRoot, err = blk.PaysetCommit()
+ require.NoError(t, err)
+ return l.AddBlock(blk, agreement.Certificate{})
+}
+
func TestLedgerBasic(t *testing.T) {
- genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion)
+ genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
const inMem = true
cfg := config.GetDefaultLocal()
cfg.Archival = true
@@ -235,7 +257,7 @@ func TestLedgerBasic(t *testing.T) {
func TestLedgerBlockHeaders(t *testing.T) {
a := require.New(t)
- genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion)
+ genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
const inMem = true
cfg := config.GetDefaultLocal()
cfg.Archival = true
@@ -379,7 +401,7 @@ func TestLedgerSingleTx(t *testing.T) {
// V15 is the earliest protocol version in active use.
// The genesis for betanet and testnet is at V15
// The genesis for mainnet is at V17
- genesisInitState, initSecrets := testGenerateInitState(t, protocol.ConsensusV15)
+ genesisInitState, initSecrets := testGenerateInitState(t, protocol.ConsensusV15, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -435,8 +457,11 @@ func TestLedgerSingleTx(t *testing.T) {
votePK[0] = 1
selPK[0] = 2
correctKeyregFields := transactions.KeyregTxnFields{
- VotePK: votePK,
- SelectionPK: selPK,
+ VotePK: votePK,
+ SelectionPK: selPK,
+ VoteKeyDilution: proto.DefaultKeyDilution,
+ VoteFirst: 0,
+ VoteLast: 10000,
}
correctKeyreg := transactions.Transaction{
@@ -577,7 +602,7 @@ func TestLedgerSingleTxV24(t *testing.T) {
a := require.New(t)
protoName := protocol.ConsensusV24
- genesisInitState, initSecrets := testGenerateInitState(t, protoName)
+ genesisInitState, initSecrets := testGenerateInitState(t, protoName, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -746,7 +771,7 @@ func TestLedgerAppCrossRoundWrites(t *testing.T) {
a := require.New(t)
protoName := protocol.ConsensusV24
- genesisInitState, initSecrets := testGenerateInitState(t, protoName)
+ genesisInitState, initSecrets := testGenerateInitState(t, protoName, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -882,7 +907,7 @@ func TestLedgerAppMultiTxnWrites(t *testing.T) {
a := require.New(t)
protoName := protocol.ConsensusV24
- genesisInitState, initSecrets := testGenerateInitState(t, protoName)
+ genesisInitState, initSecrets := testGenerateInitState(t, protoName, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -1047,7 +1072,7 @@ func testLedgerSingleTxApplyData(t *testing.T, version protocol.ConsensusVersion
backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
defer backlogPool.Shutdown()
- genesisInitState, initSecrets := testGenerateInitState(t, version)
+ genesisInitState, initSecrets := testGenerateInitState(t, version, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -1103,8 +1128,11 @@ func testLedgerSingleTxApplyData(t *testing.T, version protocol.ConsensusVersion
votePK[0] = 1
selPK[0] = 2
correctKeyregFields := transactions.KeyregTxnFields{
- VotePK: votePK,
- SelectionPK: selPK,
+ VotePK: votePK,
+ SelectionPK: selPK,
+ VoteKeyDilution: proto.DefaultKeyDilution,
+ VoteFirst: 0,
+ VoteLast: 10000,
}
correctKeyreg := transactions.Transaction{
@@ -1302,7 +1330,7 @@ func TestLedgerRegressionFaultyLeaseFirstValidCheckFuture(t *testing.T) {
func testLedgerRegressionFaultyLeaseFirstValidCheck2f3880f7(t *testing.T, version protocol.ConsensusVersion) {
a := require.New(t)
- genesisInitState, initSecrets := testGenerateInitState(t, version)
+ genesisInitState, initSecrets := testGenerateInitState(t, version, 100)
const inMem = true
cfg := config.GetDefaultLocal()
cfg.Archival = true
@@ -1409,8 +1437,8 @@ func TestLedgerReload(t *testing.T) {
require.NoError(t, err)
// if we reloaded it before it got committed, we need to roll back the round counter.
- if l.LatestCommitted() != blk.BlockHeader.Round {
- blk.BlockHeader.Round = l.LatestCommitted()
+ if latestCommitted, _ := l.LatestCommitted(); latestCommitted != blk.BlockHeader.Round {
+ blk.BlockHeader.Round = latestCommitted
}
}
if i%13 == 0 {
@@ -1423,7 +1451,7 @@ func TestLedgerReload(t *testing.T) {
func TestGetLastCatchpointLabel(t *testing.T) {
//initLedger
- genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion)
+ genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -1501,7 +1529,7 @@ func TestListAssetsAndApplications(t *testing.T) {
numElementsPerSegement := 10 // This is multiplied by 10. see randomCreatables
//initLedger
- genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion)
+ genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -1554,3 +1582,126 @@ func TestListAssetsAndApplications(t *testing.T) {
}
require.Equal(t, appCount, len(results))
}
+
+func TestLedgerMemoryLeak(t *testing.T) {
+ t.Skip() // for manual runs only
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 10000000000)
+ const inMem = false
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ log := logging.TestingLog(t)
+ l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+ defer l.Close()
+
+ maxBlocks := 10000
+ nftPerAcct := make(map[basics.Address]int)
+ lastBlock, err := l.Block(l.Latest())
+ proto := config.Consensus[lastBlock.CurrentProtocol]
+ accounts := make(map[basics.Address]basics.AccountData, len(genesisInitState.Accounts)+maxBlocks)
+ keys := make(map[basics.Address]*crypto.SignatureSecrets, len(initKeys)+maxBlocks)
+ // regular addresses: all init accounts minus pools
+ addresses := make([]basics.Address, len(genesisInitState.Accounts)-2, len(genesisInitState.Accounts)+maxBlocks)
+ i := 0
+ for addr := range genesisInitState.Accounts {
+ if addr != testPoolAddr && addr != testSinkAddr {
+ addresses[i] = addr
+ i++
+ }
+ accounts[addr] = genesisInitState.Accounts[addr]
+ keys[addr] = initKeys[addr]
+ }
+
+ curAddressIdx := 0
+ // run for maxBlocks rounds
+ // generate 1000 txn per block
+ for i := 0; i < maxBlocks; i++ {
+ stxns := make([]transactions.SignedTxn, 1000)
+ for j := 0; j < 1000; j++ {
+ txHeader := transactions.Header{
+ Sender: addresses[curAddressIdx],
+ Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2},
+ FirstValid: l.Latest() + 1,
+ LastValid: l.Latest() + 10,
+ GenesisID: t.Name(),
+ GenesisHash: crypto.Hash([]byte(t.Name())),
+ }
+
+ assetCreateFields := transactions.AssetConfigTxnFields{
+ AssetParams: basics.AssetParams{
+ Total: 10000000,
+ UnitName: fmt.Sprintf("unit_%d_%d", i, j),
+ AssetName: fmt.Sprintf("asset_%d_%d", i, j),
+ },
+ }
+
+ tx := transactions.Transaction{
+ Type: protocol.AssetConfigTx,
+ Header: txHeader,
+ AssetConfigTxnFields: assetCreateFields,
+ }
+ stxns[j] = sign(initKeys, tx)
+ nftPerAcct[addresses[curAddressIdx]]++
+
+ if nftPerAcct[addresses[curAddressIdx]] >= 990 {
+ // switch to another account
+ if curAddressIdx == len(addresses)-1 {
+ // create new account
+ var seed crypto.Seed
+ seed[1] = byte(curAddressIdx % 256)
+ seed[2] = byte((curAddressIdx >> 8) % 256)
+ seed[3] = byte((curAddressIdx >> 16) % 256)
+ seed[4] = byte((curAddressIdx >> 24) % 256)
+ x := crypto.GenerateSignatureSecrets(seed)
+ addr := basics.Address(x.SignatureVerifier)
+ sender := addresses[rand.Intn(len(genesisInitState.Accounts)-2)] // one of init accounts
+ correctTxHeader := transactions.Header{
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2},
+ FirstValid: l.Latest() + 1,
+ LastValid: l.Latest() + 10,
+ GenesisID: t.Name(),
+ GenesisHash: genesisInitState.GenesisHash,
+ }
+
+ correctPayFields := transactions.PaymentTxnFields{
+ Receiver: addr,
+ Amount: basics.MicroAlgos{Raw: 1000 * 1000000},
+ }
+
+ correctPay := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: correctTxHeader,
+ PaymentTxnFields: correctPayFields,
+ }
+
+ err = l.appendUnvalidatedTx(t, accounts, keys, correctPay, transactions.ApplyData{})
+ require.NoError(t, err)
+ ad, err := l.Lookup(l.Latest(), addr)
+ require.NoError(t, err)
+
+ addresses = append(addresses, addr)
+ keys[addr] = x
+ accounts[addr] = ad
+ }
+ curAddressIdx++
+ }
+ }
+ err = l.addBlockTxns(t, genesisInitState.Accounts, stxns, transactions.ApplyData{})
+ require.NoError(t, err)
+ if i%100 == 0 {
+ l.WaitForCommit(l.Latest())
+ fmt.Printf("block: %d\n", l.Latest())
+ }
+ if i%1000 == 0 && i > 0 {
+ memprofile := fmt.Sprintf("%s-memprof-%d", t.Name(), i)
+ f, err := os.Create(memprofile)
+ require.NoError(t, err)
+ err = pprof.WriteHeapProfile(f)
+ require.NoError(t, err)
+ f.Close()
+ fmt.Printf("Profile %s created\n", memprofile)
+ }
+ }
+}
diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go
index 006eaea0e..f2d2e3feb 100644
--- a/ledger/ledgercore/statedelta.go
+++ b/ledger/ledgercore/statedelta.go
@@ -93,7 +93,7 @@ type AccountDeltas struct {
// MakeStateDelta creates a new instance of StateDelta.
// hint is amount of transactions for evaluation, 2 * hint is for sender and receiver balance records.
// This does not play well for AssetConfig and ApplicationCall transactions on scale
-func MakeStateDelta(hdr *bookkeeping.BlockHeader, prevTimestamp int64, hint int) StateDelta {
+func MakeStateDelta(hdr *bookkeeping.BlockHeader, prevTimestamp int64, hint int, compactCertNext basics.Round) StateDelta {
return StateDelta{
Accts: AccountDeltas{
accts: make([]basics.BalanceRecord, 0, hint*2),
@@ -106,6 +106,7 @@ func MakeStateDelta(hdr *bookkeeping.BlockHeader, prevTimestamp int64, hint int)
Hdr: hdr,
PrevTimestamp: prevTimestamp,
initialTransactionsCount: hint,
+ CompactCertNext: compactCertNext,
}
}
diff --git a/ledger/ledgercore/statedelta_test.go b/ledger/ledgercore/statedelta_test.go
index 222c53feb..8f3b81d91 100644
--- a/ledger/ledgercore/statedelta_test.go
+++ b/ledger/ledgercore/statedelta_test.go
@@ -98,7 +98,7 @@ func BenchmarkMakeStateDelta(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
- MakeStateDelta(nil, 0, hint)
+ MakeStateDelta(nil, 0, hint, 0)
}
}
diff --git a/ledger/perf_test.go b/ledger/perf_test.go
index c4aee9c2d..908a90f0a 100644
--- a/ledger/perf_test.go
+++ b/ledger/perf_test.go
@@ -47,6 +47,9 @@ func genesis(naccts int) (InitState, []basics.Address, []*crypto.SignatureSecret
keys := []*crypto.SignatureSecrets{}
accts := make(map[basics.Address]basics.AccountData)
+ // 10 billion microalgos, across N accounts and pool and sink
+ amount := 10 * 1000000000 * 1000000 / uint64(naccts+2)
+
for i := 0; i < naccts; i++ {
var seed crypto.Seed
crypto.RandBytes(seed[:])
@@ -57,17 +60,17 @@ func genesis(naccts int) (InitState, []basics.Address, []*crypto.SignatureSecret
addrs = append(addrs, addr)
adata := basics.AccountData{}
- adata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000 / uint64(naccts)
+ adata.MicroAlgos.Raw = amount //1000 * 1000 * 1000 * 1000 / uint64(naccts)
accts[addr] = adata
}
pooldata := basics.AccountData{}
- pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ pooldata.MicroAlgos.Raw = amount //1000 * 1000 * 1000 * 1000
pooldata.Status = basics.NotParticipating
accts[testPoolAddr] = pooldata
sinkdata := basics.AccountData{}
- sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ sinkdata.MicroAlgos.Raw = amount //1000 * 1000 * 1000 * 1000
sinkdata.Status = basics.NotParticipating
accts[testSinkAddr] = sinkdata
diff --git a/ledger/txtail_test.go b/ledger/txtail_test.go
index efef1ffd0..c314f3856 100644
--- a/ledger/txtail_test.go
+++ b/ledger/txtail_test.go
@@ -58,7 +58,7 @@ func TestTxTailCheckdup(t *testing.T) {
txleases := make(map[ledgercore.Txlease]basics.Round, 1)
txleases[ledgercore.Txlease{Sender: basics.Address(crypto.Hash([]byte{byte(rnd % 256), byte(rnd / 256), byte(2)})), Lease: crypto.Hash([]byte{byte(rnd % 256), byte(rnd / 256), byte(3)})}] = rnd + leasevalidity
- delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, 1)
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, 1, 0)
delta.Txids = txids
delta.Txleases = txleases
tail.newBlock(blk, delta)
diff --git a/libgoal/participation.go b/libgoal/participation.go
index 319229575..c95d4c3c6 100644
--- a/libgoal/participation.go
+++ b/libgoal/participation.go
@@ -47,7 +47,7 @@ func (c *Client) chooseParticipation(address basics.Address, round basics.Round)
// This lambda will be used for finding the desired file.
checkIfFileIsDesiredKey := func(file os.FileInfo, expiresAfter basics.Round) (part account.Participation, err error) {
var handle db.Accessor
- var partCandidate account.Participation
+ var partCandidate account.PersistedParticipation
// If it can't be a participation key database, skip it
if !config.IsPartKeyFilename(file.Name()) {
@@ -62,14 +62,15 @@ func (c *Client) chooseParticipation(address basics.Address, round basics.Round)
// Couldn't open it, skip it
return
}
- defer handle.Close()
// Fetch an account.Participation from the database
partCandidate, err = account.RestoreParticipation(handle)
if err != nil {
// Couldn't read it, skip it
+ handle.Close()
return
}
+ defer partCandidate.Close()
// Return the Participation valid for this round that relates to the passed address
// that expires farthest in the future.
@@ -77,7 +78,7 @@ func (c *Client) chooseParticipation(address basics.Address, round basics.Round)
// in the short-term.
// In the future we should allow the user to specify exactly which partkeys to register.
if partCandidate.FirstValid <= round && round <= partCandidate.LastValid && partCandidate.Parent == address && partCandidate.LastValid > expiresAfter {
- part = partCandidate
+ part = partCandidate.Participation
}
return
}
@@ -164,13 +165,21 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k
// Fill the database with new participation keys
newPart, err := account.FillDBWithParticipationKeys(partdb, parsedAddr, firstRound, lastRound, keyDilution)
- return newPart, partKeyPath, err
+ part = newPart.Participation
+ newPart.Close()
+ return part, partKeyPath, err
}
// InstallParticipationKeys creates a .partkey database for a given address,
// based on an existing database from inputfile. On successful install, it
// deletes the input file.
func (c *Client) InstallParticipationKeys(inputfile string) (part account.Participation, filePath string, err error) {
+ proto, ok := c.consensus[protocol.ConsensusCurrentVersion]
+ if !ok {
+ err = fmt.Errorf("Unknown consensus protocol %s", protocol.ConsensusCurrentVersion)
+ return
+ }
+
// Get the GenesisID for use in the participation key path
var genID string
genID, err = c.GenesisID()
@@ -210,12 +219,7 @@ func (c *Client) InstallParticipationKeys(inputfile string) (part account.Partic
newpartkey.Store = newdb
err = newpartkey.Persist()
if err != nil {
- return
- }
-
- proto, ok := c.consensus[protocol.ConsensusCurrentVersion]
- if !ok {
- err = fmt.Errorf("Unknown consensus protocol %s", protocol.ConsensusCurrentVersion)
+ newpartkey.Close()
return
}
@@ -229,11 +233,13 @@ func (c *Client) InstallParticipationKeys(inputfile string) (part account.Partic
errCh := partkey.DeleteOldKeys(basics.Round(math.MaxUint64), proto)
err = <-errCh
if err != nil {
+ newpartkey.Close()
return
}
os.Remove(inputfile)
-
- return newpartkey, newdbpath, nil
+ part = newpartkey.Participation
+ newpartkey.Close()
+ return part, newdbpath, nil
}
// ListParticipationKeys returns the available participation keys,
@@ -269,13 +275,14 @@ func (c *Client) ListParticipationKeys() (partKeyFiles map[string]account.Partic
// Fetch an account.Participation from the database
part, err := account.RestoreParticipation(handle)
- handle.Close()
if err != nil {
// Couldn't read it, skip it
+ handle.Close()
continue
}
- partKeyFiles[filename] = part
+ partKeyFiles[filename] = part.Participation
+ part.Close()
}
return
diff --git a/libgoal/transactions.go b/libgoal/transactions.go
index b35b06cd6..8726daff5 100644
--- a/libgoal/transactions.go
+++ b/libgoal/transactions.go
@@ -229,7 +229,7 @@ func (c *Client) MakeUnsignedGoOnlineTx(address string, part *account.Participat
parsedLastValid := basics.Round(lastValid)
parsedFee := basics.MicroAlgos{Raw: fee}
- goOnlineTransaction := part.GenerateRegistrationTransaction(parsedFee, parsedFrstValid, parsedLastValid, leaseBytes, cparams)
+ goOnlineTransaction := part.GenerateRegistrationTransaction(parsedFee, parsedFrstValid, parsedLastValid, leaseBytes)
if cparams.SupportGenesisHash {
var genHash crypto.Digest
copy(genHash[:], params.GenesisHash)
diff --git a/logging/telemetryspec/metric.go b/logging/telemetryspec/metric.go
index ff82bb5ad..349d3f972 100644
--- a/logging/telemetryspec/metric.go
+++ b/logging/telemetryspec/metric.go
@@ -140,6 +140,28 @@ func (m RoundTimingMetrics) Identifier() Metric {
return roundTimingMetricsIdentifier
}
+//-------------------------------------------------------
+// AccountsUpdate
+const accountsUpdateMetricsIdentifier Metric = "AccountsUpdate"
+
+// AccountsUpdateMetrics is the set of metrics captured when we process accountUpdates.commitRound
+type AccountsUpdateMetrics struct {
+ StartRound uint64
+ RoundsCount uint64
+ OldAccountPreloadDuration time.Duration
+ MerkleTrieUpdateDuration time.Duration
+ AccountsWritingDuration time.Duration
+ DatabaseCommitDuration time.Duration
+ MemoryUpdatesDuration time.Duration
+ UpdatedAccountsCount uint64
+ UpdatedCreatablesCount uint64
+}
+
+// Identifier implements the required MetricDetails interface, retrieving the Identifier for this set of metrics.
+func (m AccountsUpdateMetrics) Identifier() Metric {
+ return accountsUpdateMetricsIdentifier
+}
+
type transactionProcessingTimeDistibution struct {
// 10 buckets: 0-100Kns, 100Kns-200Kns .. 900Kns-1ms
// 9 buckets: 1ms-2ms .. 9ms-10ms
diff --git a/netdeploy/network.go b/netdeploy/network.go
index 1260697fd..3295f5116 100644
--- a/netdeploy/network.go
+++ b/netdeploy/network.go
@@ -22,6 +22,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
+ "sort"
"strings"
"time"
@@ -85,10 +86,11 @@ func CreateNetworkFromTemplate(name, rootDir, templateFile, binDir string, impor
return n, err
}
- n.cfg.RelayDirs, n.nodeDirs, n.gen, err = template.createNodeDirectories(rootDir, binDir, importKeys)
+ n.cfg.RelayDirs, n.nodeDirs, err = template.createNodeDirectories(rootDir, binDir, importKeys)
if err != nil {
return n, err
}
+ n.gen = template.Genesis
err = n.Save(rootDir)
n.SetConsensus(binDir, consensus)
@@ -146,6 +148,7 @@ func (n Network) NodeDataDirs() []string {
for _, nodeDir := range n.nodeDirs {
directories = append(directories, n.getNodeFullPath(nodeDir))
}
+ sort.Strings(directories)
return directories
}
@@ -227,7 +230,7 @@ func (n *Network) scanForNodes() error {
genesisFile := filepath.Join(n.getNodeFullPath(nodeName), genesisFileName)
fileExists := util.FileExists(genesisFile)
if fileExists {
- isPrimeDir := strings.EqualFold(nodeName, n.cfg.RelayDirs[0])
+ isPrimeDir := len(n.cfg.RelayDirs) > 0 && strings.EqualFold(nodeName, n.cfg.RelayDirs[0])
if isPrimeDir {
sawPrimeDir = true
} else {
@@ -235,7 +238,7 @@ func (n *Network) scanForNodes() error {
}
}
}
- if !sawPrimeDir {
+ if !sawPrimeDir && len(nodes) > 1 {
return fmt.Errorf("primary relay directory (%s) invalid - can't run", n.cfg.RelayDirs[0])
}
n.nodeDirs = nodes
diff --git a/netdeploy/networkTemplate.go b/netdeploy/networkTemplate.go
index 9a4ae113d..ecc7affdc 100644
--- a/netdeploy/networkTemplate.go
+++ b/netdeploy/networkTemplate.go
@@ -50,17 +50,13 @@ func (t NetworkTemplate) generateGenesisAndWallets(targetFolder, networkName, bi
genesisData := t.Genesis
genesisData.NetworkName = networkName
mergedConsensus := config.Consensus.Merge(t.Consensus)
- return gen.GenerateGenesisFiles(genesisData, mergedConsensus, targetFolder, true)
+ return gen.GenerateGenesisFiles(genesisData, mergedConsensus, targetFolder, os.Stdout)
}
// Create data folders for all NodeConfigs, configuring relays appropriately and
// returning the full path to the 'prime' relay and node folders (the first one created) and the genesis data used in this network.
-func (t NetworkTemplate) createNodeDirectories(targetFolder string, binDir string, importKeys bool) (relayDirs []string, nodeDirs map[string]string, genData gen.GenesisData, err error) {
+func (t NetworkTemplate) createNodeDirectories(targetFolder string, binDir string, importKeys bool) (relayDirs []string, nodeDirs map[string]string, err error) {
genesisFile := filepath.Join(targetFolder, genesisFileName)
- genData, err = gen.LoadGenesisData(genesisFile)
- if err != nil {
- return
- }
nodeDirs = make(map[string]string)
getGenesisVerCmd := filepath.Join(binDir, "algod")
@@ -71,6 +67,8 @@ func (t NetworkTemplate) createNodeDirectories(targetFolder string, binDir strin
}
genesisVer = strings.TrimSpace(genesisVer)
+ relaysCount := countRelayNodes(t.Nodes)
+
for _, cfg := range t.Nodes {
nodeDir := filepath.Join(targetFolder, cfg.Name)
err = os.Mkdir(nodeDir, os.ModePerm)
@@ -138,7 +136,7 @@ func (t NetworkTemplate) createNodeDirectories(targetFolder string, binDir strin
// Create any necessary config.json file for this node
nodeCfg := filepath.Join(nodeDir, config.ConfigFilename)
- err = createConfigFile(cfg, nodeCfg, len(t.Nodes)-1) // minus 1 to avoid counting self
+ err = createConfigFile(cfg, nodeCfg, len(t.Nodes)-1, relaysCount) // minus 1 to avoid counting self
if err != nil {
return
}
@@ -200,11 +198,7 @@ func (t NetworkTemplate) Validate() error {
// No wallet can be assigned to more than one node
// At least one relay is required
wallets := make(map[string]bool)
- relayCount := 0
for _, cfg := range t.Nodes {
- if cfg.IsRelay {
- relayCount++
- }
for _, wallet := range cfg.Wallets {
upperWallet := strings.ToUpper(wallet.Name)
if _, found := wallets[upperWallet]; found {
@@ -213,20 +207,33 @@ func (t NetworkTemplate) Validate() error {
wallets[upperWallet] = true
}
}
- if relayCount == 0 {
- return fmt.Errorf("invalid template: at least one relay is required")
- }
+ if len(t.Nodes) > 1 && countRelayNodes(t.Nodes) == 0 {
+ return fmt.Errorf("invalid template: at least one relay is required when more than a single node presents")
+ }
return nil
}
-func createConfigFile(node remote.NodeConfigGoal, configFile string, numNodes int) error {
+// countRelayNodes counts the total number of relays
+func countRelayNodes(nodeCfgs []remote.NodeConfigGoal) (relayCount int) {
+ for _, cfg := range nodeCfgs {
+ if cfg.IsRelay {
+ relayCount++
+ }
+ }
+ return
+}
+
+func createConfigFile(node remote.NodeConfigGoal, configFile string, numNodes int, relaysCount int) error {
cfg := config.GetDefaultLocal()
cfg.GossipFanout = numNodes
// Override default :8080 REST endpoint, and disable SRV lookup
cfg.EndpointAddress = "127.0.0.1:0"
cfg.DNSBootstrapID = ""
cfg.EnableProfiler = true
+ if relaysCount == 0 {
+ cfg.DisableNetworking = true
+ }
if node.IsRelay {
// Have relays listen on any localhost port
diff --git a/netdeploy/remote/bootstrappedNetwork.go b/netdeploy/remote/bootstrappedNetwork.go
new file mode 100644
index 000000000..374d85780
--- /dev/null
+++ b/netdeploy/remote/bootstrappedNetwork.go
@@ -0,0 +1,45 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package remote
+
+import (
+ "encoding/json"
+ "os"
+)
+
+//BootstrappedNetwork contains the specs for generating db files
+type BootstrappedNetwork struct {
+ NumRounds uint64 `json:"numRounds"`
+ RoundTransactionsCount uint64 `json:"roundTransactionsCount"`
+ GeneratedAccountsCount uint64 `json:"generatedAccountsCount"`
+ GeneratedAssetsCount uint64 `json:"generatedAssetsCount"`
+ GeneratedApplicationCount uint64 `json:"generatedApplicationCount"`
+ SourceWalletName string `json:"sourceWalletName"`
+}
+
+// LoadBootstrappedData loads a bootstrappedFile structure from a json file
+func LoadBootstrappedData(file string) (data BootstrappedNetwork, err error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return
+ }
+ defer f.Close()
+
+ dec := json.NewDecoder(f)
+ err = dec.Decode(&data)
+ return data, err
+}
diff --git a/netdeploy/remote/bootstrappedNetwork_test.go b/netdeploy/remote/bootstrappedNetwork_test.go
new file mode 100644
index 000000000..e863912db
--- /dev/null
+++ b/netdeploy/remote/bootstrappedNetwork_test.go
@@ -0,0 +1,49 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package remote
+
+import (
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestLoadBootstrappedData(t *testing.T) {
+ badSpecPath := filepath.Join("./../../test", "testdata/deployednettemplates/networks/bootstrapped/badSpec.json")
+ _, err := LoadBootstrappedData(badSpecPath)
+ require.NotEqual(t, nil, err)
+
+ okSpecPath := filepath.Join("./../../test", "testdata/deployednettemplates/networks/bootstrapped/okSpec.json")
+ var data BootstrappedNetwork
+ data, err = LoadBootstrappedData(okSpecPath)
+ expected := BootstrappedNetwork{
+ NumRounds: 65000,
+ RoundTransactionsCount: 1000,
+ GeneratedAccountsCount: 7000000,
+ GeneratedAssetsCount: 200000,
+ GeneratedApplicationCount: 1000000,
+ SourceWalletName: "wallet1",
+ }
+ require.Equal(t, nil, err)
+ require.Equal(t, data.NumRounds, expected.NumRounds)
+ require.Equal(t, data.RoundTransactionsCount, expected.RoundTransactionsCount)
+ require.Equal(t, data.GeneratedAccountsCount, expected.GeneratedAccountsCount)
+ require.Equal(t, data.GeneratedAssetsCount, expected.GeneratedAssetsCount)
+ require.Equal(t, data.GeneratedApplicationCount, expected.GeneratedApplicationCount)
+ require.Equal(t, data.SourceWalletName, expected.SourceWalletName)
+}
diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go
index cb78c2e7d..331a101c0 100644
--- a/netdeploy/remote/deployedNetwork.go
+++ b/netdeploy/remote/deployedNetwork.go
@@ -20,13 +20,24 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
+ "math/rand"
"os"
"path/filepath"
"strconv"
"strings"
+ "time"
+ "github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/gen"
+ "github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util"
"github.com/algorand/go-algorand/util/codecs"
)
@@ -62,13 +73,48 @@ type DeployedNetworkConfig struct {
// DeployedNetwork represents the complete configuration specification for a deployed network
type DeployedNetwork struct {
- useExistingGenesis bool
+ useExistingGenesis bool
+ createBoostrappedNetwork bool
+ GenesisData gen.GenesisData
+ Topology topology
+ Hosts []HostConfig
+ BootstrappedNet BootstrappedNetwork
+}
+
+type netState struct {
+ nAccounts uint64
+ nAssets uint64
+ nApplications uint64
+ roundTxnCnt uint64
+
+ assetPerAcct int
+ appsPerAcct int
+
+ genesisID string
+ genesisHash crypto.Digest
+ poolAddr basics.Address
+ sinkAddr basics.Address
+
+ accountsCreated bool
+ txnState protocol.TxType
- GenesisData gen.GenesisData
- Topology topology
- Hosts []HostConfig
+ round basics.Round
+ accounts []basics.Address
+ txnCount uint64
+ fundPerAccount basics.MicroAlgos
}
+const program = `#pragma version 2
+txn ApplicationID
+bz ok
+int 0
+byte "key"
+byte "value"
+app_local_put
+ok:
+int 1
+`
+
// InitDeployedNetworkConfig loads the DeployedNetworkConfig from a file
func InitDeployedNetworkConfig(file string, buildConfig BuildConfig) (cfg DeployedNetworkConfig, err error) {
processedFile, err := loadAndProcessConfig(file, buildConfig)
@@ -199,6 +245,15 @@ func (cfg *DeployedNetwork) SetUseExistingGenesisFiles(useExisting bool) bool {
return old
}
+// SetUseBoostrappedFiles sets the override flag indicating we should use existing genesis
+// files instead of generating new ones. This is useful for permanent networks like devnet and testnet.
+// Returns the previous value.
+func (cfg *DeployedNetwork) SetUseBoostrappedFiles(boostrappedFile bool) bool {
+ old := cfg.createBoostrappedNetwork
+ cfg.createBoostrappedNetwork = boostrappedFile
+ return old
+}
+
// Validate uses the specified template to deploy a new private network
// under the specified root directory.
func (cfg DeployedNetwork) Validate(buildCfg BuildConfig, rootDir string) (err error) {
@@ -268,7 +323,7 @@ func (cfg DeployedNetwork) BuildNetworkFromTemplate(buildCfg BuildConfig, rootDi
if cfg.useExistingGenesis {
fmt.Println(" *** using existing genesis files ***")
} else {
- if err = gen.GenerateGenesisFiles(cfg.GenesisData, config.Consensus, genesisFolder, true); err != nil {
+ if err = gen.GenerateGenesisFiles(cfg.GenesisData, config.Consensus, genesisFolder, os.Stdout); err != nil {
return
}
}
@@ -283,9 +338,428 @@ func (cfg DeployedNetwork) BuildNetworkFromTemplate(buildCfg BuildConfig, rootDi
return
}
+ if cfg.createBoostrappedNetwork {
+ fmt.Println("Generating db files ")
+
+ cfg.GenerateDatabaseFiles(cfg.BootstrappedNet, genesisFolder)
+ }
+
return
}
+//GenerateDatabaseFiles generates database files according to the configurations
+func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, genesisFolder string) error {
+
+ accounts := make(map[basics.Address]basics.AccountData)
+
+ genesis, err := bookkeeping.LoadGenesisFromFile(filepath.Join(genesisFolder, "genesis.json"))
+ if err != nil {
+ return err
+ }
+
+ var src basics.Address
+ var addr basics.Address
+ var poolAddr basics.Address
+ var sinkAddr basics.Address
+
+ srcWalletName := strings.ToLower(fileCfgs.SourceWalletName)
+
+ for _, alloc := range genesis.Allocation {
+ comment := strings.ToLower(alloc.Comment)
+ addr, err = basics.UnmarshalChecksumAddress(alloc.Address)
+ if err != nil {
+ return fmt.Errorf("failed to unmarshal '%s' address '%v' %w", alloc.Comment, alloc.Address, err)
+ }
+ switch comment {
+ case srcWalletName:
+ src = addr
+ case "feesink":
+ poolAddr = addr
+ case "rewardspool":
+ sinkAddr = addr
+ default:
+ }
+
+ accounts[addr] = alloc.State
+
+ }
+
+ //initial state
+
+ bootstrappedNet := netState{
+ nAssets: fileCfgs.GeneratedAssetsCount,
+ nApplications: fileCfgs.GeneratedApplicationCount,
+ txnState: protocol.PaymentTx,
+ roundTxnCnt: fileCfgs.RoundTransactionsCount,
+ round: basics.Round(0),
+ genesisID: genesis.ID(),
+ genesisHash: crypto.HashObj(genesis),
+ poolAddr: poolAddr,
+ sinkAddr: sinkAddr,
+ }
+
+ var params config.ConsensusParams
+ if len(genesis.Proto) == 0 {
+ params = config.Consensus[protocol.ConsensusCurrentVersion]
+ } else {
+ params = config.Consensus[genesis.Proto]
+ }
+
+ minAccounts := accountsNeeded(fileCfgs.GeneratedApplicationCount, fileCfgs.GeneratedAssetsCount, params)
+ nAccounts := fileCfgs.GeneratedAccountsCount
+ if minAccounts > nAccounts {
+ bootstrappedNet.nAccounts = minAccounts
+ } else {
+ bootstrappedNet.nAccounts = nAccounts
+ }
+
+ //fund src account with enough funding
+ bootstrappedNet.fundPerAccount = basics.MicroAlgos{Raw: uint64(bootstrappedNet.nAssets) * params.MinBalance * 2}
+ totalFunds := accounts[src].MicroAlgos.Raw + bootstrappedNet.fundPerAccount.Raw*bootstrappedNet.nAccounts + bootstrappedNet.roundTxnCnt*fileCfgs.NumRounds
+ accounts[src] = basics.MakeAccountData(basics.Online, basics.MicroAlgos{Raw: totalFunds})
+
+ //init block
+ initState, err := generateInitState(accounts, &bootstrappedNet)
+ if err != nil {
+ return err
+ }
+ localCfg := config.GetDefaultLocal()
+ localCfg.Archival = true
+ localCfg.CatchpointTracking = -1
+ localCfg.LedgerSynchronousMode = 0
+ log := logging.NewLogger()
+ l, err := ledger.OpenLedger(log, filepath.Join(genesisFolder, "bootstrapped"), false, initState, localCfg)
+ if err != nil {
+ return err
+ }
+
+ //create accounts, apps and assets
+ prev, _ := l.Block(l.Latest())
+ err = generateAccounts(src, fileCfgs.RoundTransactionsCount, prev, l, &bootstrappedNet, params)
+ if err != nil {
+ return err
+ }
+
+ //create more transactions
+ prev, _ = l.Block(l.Latest())
+ for i := uint64(bootstrappedNet.round); i < fileCfgs.NumRounds; i++ {
+ bootstrappedNet.round++
+ blk, _ := createBlock(src, prev, fileCfgs.RoundTransactionsCount, &bootstrappedNet, params)
+ err = l.AddBlock(blk, agreement.Certificate{Round: bootstrappedNet.round})
+ if err != nil {
+ fmt.Printf("Error %v\n", err)
+ return err
+ }
+ prev, _ = l.Block(l.Latest())
+ }
+
+ l.WaitForCommit(bootstrappedNet.round)
+ l.Close()
+
+ localCfg.CatchpointTracking = 0
+ l, err = ledger.OpenLedger(log, genesisFolder+"/bootstrapped", false, initState, localCfg)
+ if err != nil {
+ return err
+ }
+ l.Close()
+
+ return nil
+}
+
+func getGenesisAlloc(name string, allocation []bookkeeping.GenesisAllocation) bookkeeping.GenesisAllocation {
+ name = strings.ToLower(name)
+ for _, alloc := range allocation {
+ if strings.ToLower(alloc.Comment) == name {
+ return alloc
+ }
+ }
+ return bookkeeping.GenesisAllocation{}
+}
+
+func keypair() *crypto.SignatureSecrets {
+ var seed crypto.Seed
+ crypto.RandBytes(seed[:])
+ s := crypto.GenerateSignatureSecrets(seed)
+ return s
+}
+
+func generateInitState(accounts map[basics.Address]basics.AccountData, bootstrappedNet *netState) (ledger.InitState, error) {
+
+ var initState ledger.InitState
+
+ block := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ TimeStamp: time.Now().Unix(),
+ GenesisID: bootstrappedNet.genesisID,
+ GenesisHash: bootstrappedNet.genesisHash,
+ Round: bootstrappedNet.round,
+ RewardsState: bookkeeping.RewardsState{
+ RewardsRate: 1,
+ RewardsPool: bootstrappedNet.poolAddr,
+ FeeSink: bootstrappedNet.sinkAddr,
+ },
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: protocol.ConsensusCurrentVersion,
+ },
+ TxnCounter: 0,
+ },
+ }
+
+ initState.Block = block
+ initState.Accounts = accounts
+ initState.GenesisHash = bootstrappedNet.genesisHash
+ return initState, nil
+}
+
+func createBlock(src basics.Address, prev bookkeeping.Block, roundTxnCnt uint64, bootstrappedNet *netState, csParams config.ConsensusParams) (bookkeeping.Block, error) {
+ payset := make([]transactions.SignedTxnInBlock, 0, roundTxnCnt)
+ txibs := make([]transactions.SignedTxnInBlock, 0, roundTxnCnt)
+
+ block := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ TimeStamp: prev.TimeStamp + int64(crypto.RandUint64()%100*1000),
+ GenesisID: bootstrappedNet.genesisID,
+ GenesisHash: bootstrappedNet.genesisHash,
+ Round: bootstrappedNet.round,
+ RewardsState: bookkeeping.RewardsState{
+ RewardsRate: 1,
+ RewardsPool: prev.RewardsPool,
+ RewardsLevel: prev.RewardsLevel,
+ FeeSink: prev.FeeSink,
+ },
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: prev.CurrentProtocol,
+ },
+ TxnCounter: bootstrappedNet.txnCount,
+ },
+ }
+
+ stxns, err := createSignedTx(src, bootstrappedNet.round, csParams, bootstrappedNet)
+ if err != nil {
+ return bookkeeping.Block{}, err
+ }
+
+ for _, stxn := range stxns {
+ txib, err := block.EncodeSignedTxn(stxn, transactions.ApplyData{})
+ if err != nil {
+ return bookkeeping.Block{}, err
+ }
+ txibs = append(txibs, txib)
+ }
+
+ payset = append(payset, txibs...)
+ bootstrappedNet.txnCount += uint64(len(payset))
+ block.Payset = payset
+ block.TxnRoot, err = block.PaysetCommit()
+ if err != nil {
+ return bookkeeping.Block{}, err
+ }
+
+ return block, nil
+}
+
+func generateAccounts(src basics.Address, roundTxnCnt uint64, prev bookkeeping.Block, l *ledger.Ledger, bootstrappedNet *netState, csParams config.ConsensusParams) error {
+
+ for !bootstrappedNet.accountsCreated {
+ //create accounts
+ bootstrappedNet.round++
+ blk, _ := createBlock(src, prev, roundTxnCnt, bootstrappedNet, csParams)
+ err := l.AddBlock(blk, agreement.Certificate{Round: bootstrappedNet.round})
+ if err != nil {
+ fmt.Printf("Error %v\n", err)
+ return err
+ }
+
+ prev, _ = l.Block(l.Latest())
+
+ }
+
+ return nil
+}
+
+func accountsNeeded(appsCount uint64, assetCount uint64, params config.ConsensusParams) uint64 {
+ var maxApps uint64
+ var nAppAcct uint64
+
+ maxApps = uint64(params.MaxAppsCreated)
+
+ if maxApps > 0 {
+ nAppAcct = appsCount / maxApps
+ if appsCount%maxApps != 0 {
+ nAppAcct++
+ }
+ }
+
+ var maxAssets uint64
+ var nAssetAcct uint64
+ maxAssets = uint64(params.MaxAssetsPerAccount)
+
+ if maxAssets > 0 {
+ nAssetAcct = assetCount / maxAssets
+ if assetCount%maxAssets != 0 {
+ nAssetAcct++
+ }
+ }
+
+ if nAppAcct > nAssetAcct {
+ return nAppAcct
+ }
+ return nAssetAcct
+}
+
+func createSignedTx(src basics.Address, round basics.Round, params config.ConsensusParams, bootstrappedNet *netState) ([]transactions.SignedTxn, error) {
+
+ if bootstrappedNet.nApplications == 0 && bootstrappedNet.nAccounts == 0 && bootstrappedNet.nAssets == 0 {
+ bootstrappedNet.accountsCreated = true
+ }
+ var sgtxns []transactions.SignedTxn
+
+ header := transactions.Header{
+ Fee: basics.MicroAlgos{Raw: 1},
+ FirstValid: round,
+ LastValid: round,
+ GenesisID: bootstrappedNet.genesisID,
+ GenesisHash: bootstrappedNet.genesisHash,
+ }
+
+ if bootstrappedNet.txnState == protocol.PaymentTx {
+ var accounts []basics.Address
+ bootstrappedNet.appsPerAcct = 0
+ bootstrappedNet.assetPerAcct = 0
+ n := bootstrappedNet.nAccounts
+ if n == 0 || n >= bootstrappedNet.roundTxnCnt {
+ n = bootstrappedNet.roundTxnCnt
+ }
+
+ if !bootstrappedNet.accountsCreated {
+ for i := uint64(0); i < n; i++ {
+ secretDst := keypair()
+ dst := basics.Address(secretDst.SignatureVerifier)
+ accounts = append(accounts, dst)
+
+ header.Sender = src
+
+ tx := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: header,
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: dst,
+ Amount: bootstrappedNet.fundPerAccount,
+ },
+ }
+ t := transactions.SignedTxn{Txn: tx}
+ sgtxns = append(sgtxns, t)
+ }
+ bootstrappedNet.nAccounts -= uint64(len(sgtxns))
+ bootstrappedNet.accounts = accounts
+ if bootstrappedNet.nAssets > 0 {
+ bootstrappedNet.txnState = protocol.AssetConfigTx
+ } else if bootstrappedNet.nApplications > 0 {
+ bootstrappedNet.txnState = protocol.ApplicationCallTx
+ }
+ } else {
+ //send payments to created accounts randomly
+ accti := rand.Intn(len(bootstrappedNet.accounts))
+ for i := uint64(0); i < n; i++ {
+ header.Sender = src
+ tx := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: header,
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: bootstrappedNet.accounts[accti],
+ Amount: basics.MicroAlgos{Raw: 0},
+ },
+ }
+ t := transactions.SignedTxn{Txn: tx}
+ sgtxns = append(sgtxns, t)
+ }
+
+ }
+
+ } else if bootstrappedNet.txnState == protocol.AssetConfigTx {
+ i := uint64(0)
+ for _, acct := range bootstrappedNet.accounts {
+ if i == bootstrappedNet.nAssets {
+ break
+ }
+ header.Sender = acct
+ assetParam := basics.AssetParams{
+ Total: 100,
+ UnitName: "unit",
+ Manager: acct,
+ }
+
+ assetConfigFields := transactions.AssetConfigTxnFields{
+ AssetParams: assetParam,
+ }
+
+ tx := transactions.Transaction{
+ Type: protocol.AssetConfigTx,
+ Header: header,
+ AssetConfigTxnFields: assetConfigFields,
+ }
+ t := transactions.SignedTxn{Txn: tx}
+ sgtxns = append(sgtxns, t)
+ i++
+ }
+ bootstrappedNet.assetPerAcct++
+ bootstrappedNet.nAssets -= uint64(len(sgtxns))
+
+ if bootstrappedNet.nAssets == 0 || bootstrappedNet.assetPerAcct == params.MaxAssetsPerAccount {
+ if bootstrappedNet.nApplications > 0 {
+ bootstrappedNet.txnState = protocol.ApplicationCallTx
+ } else {
+ bootstrappedNet.txnState = protocol.PaymentTx
+ }
+
+ }
+ } else if bootstrappedNet.txnState == protocol.ApplicationCallTx {
+ ops, err := logic.AssembleString(program)
+ if err != nil {
+ return []transactions.SignedTxn{}, err
+ }
+ approval := ops.Program
+ ops, err = logic.AssembleString("#pragma version 2 int 1")
+ if err != nil {
+ panic(err)
+ }
+ i := uint64(0)
+ for _, acct := range bootstrappedNet.accounts {
+ if i == bootstrappedNet.nApplications {
+ break
+ }
+ header.Sender = acct
+ appCallFields := transactions.ApplicationCallTxnFields{
+ OnCompletion: transactions.NoOpOC,
+ ApplicationID: 0,
+ ClearStateProgram: ops.Program,
+ ApprovalProgram: approval,
+ ApplicationArgs: [][]byte{
+ []byte("check"),
+ []byte("bar"),
+ },
+ }
+ tx := transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: header,
+
+ ApplicationCallTxnFields: appCallFields,
+ }
+
+ t := transactions.SignedTxn{Txn: tx}
+ sgtxns = append(sgtxns, t)
+ i++
+ }
+
+ bootstrappedNet.nApplications -= uint64(len(sgtxns))
+ bootstrappedNet.appsPerAcct++
+ if bootstrappedNet.nApplications == 0 || bootstrappedNet.appsPerAcct == params.MaxAppsCreated {
+ bootstrappedNet.txnState = protocol.PaymentTx
+ }
+ }
+ return sgtxns, nil
+}
+
type walletTargetData struct {
path string
partOnly bool
diff --git a/netdeploy/remote/deployedNetwork_test.go b/netdeploy/remote/deployedNetwork_test.go
new file mode 100644
index 000000000..923726219
--- /dev/null
+++ b/netdeploy/remote/deployedNetwork_test.go
@@ -0,0 +1,121 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package remote
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+func TestCreateSignedTx(t *testing.T) {
+ var networkState netState
+ networkState.nApplications = 2
+ networkState.nAssets = 2
+ networkState.nAccounts = 10
+ networkState.roundTxnCnt = 4
+ networkState.txnState = protocol.PaymentTx
+
+ params := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ secretDst := keypair()
+ src := basics.Address(secretDst.SignatureVerifier)
+
+ // create accounts
+ sgtxns, _ := createSignedTx(src, basics.Round(1), params, &networkState)
+ require.Equal(t, 4, len(sgtxns))
+ require.Equal(t, protocol.AssetConfigTx, networkState.txnState)
+ for _, sntx := range sgtxns {
+ require.Equal(t, protocol.PaymentTx, sntx.Txn.Type)
+ }
+
+ initialAccounts := networkState.accounts
+
+ // should be creating assets next
+ sgtxns, _ = createSignedTx(src, basics.Round(1), params, &networkState)
+ accounts := networkState.accounts
+ require.Equal(t, 2, len(sgtxns))
+ require.Equal(t, protocol.ApplicationCallTx, networkState.txnState)
+ require.Equal(t, uint64(0), networkState.nAssets)
+ // same accounts should be used
+ require.Equal(t, initialAccounts[0], accounts[0])
+ for _, sntx := range sgtxns {
+ require.Equal(t, protocol.AssetConfigTx, sntx.Txn.Type)
+ }
+
+ // should be creating applications next
+ sgtxns, _ = createSignedTx(src, basics.Round(1), params, &networkState)
+ require.Equal(t, 2, len(sgtxns))
+ require.Equal(t, protocol.PaymentTx, networkState.txnState)
+ require.Equal(t, uint64(0), networkState.nApplications)
+ require.Equal(t, initialAccounts[0], accounts[0])
+ for _, sntx := range sgtxns {
+ require.Equal(t, protocol.ApplicationCallTx, sntx.Txn.Type)
+ }
+
+ // create payment transactions for the remainder rounds
+ sgtxns, _ = createSignedTx(src, basics.Round(1), params, &networkState)
+ require.Equal(t, 4, len(sgtxns))
+ require.Equal(t, protocol.PaymentTx, networkState.txnState)
+ //new accounts should be created
+ accounts = networkState.accounts
+ require.NotEqual(t, initialAccounts[0], accounts[0])
+ for _, sntx := range sgtxns {
+ require.Equal(t, protocol.PaymentTx, sntx.Txn.Type)
+ }
+
+ // assets per account should not exceed limit
+ networkState.txnState = protocol.PaymentTx
+ networkState.nAssets = 10
+ networkState.nApplications = 10
+ networkState.nAccounts = 1
+ networkState.assetPerAcct = 0
+ networkState.appsPerAcct = 0
+
+ params.MaxAssetsPerAccount = 5
+ // create 1 account and try to create 6 assets for the account
+ createSignedTx(src, basics.Round(1), params, &networkState)
+ for i := 0; i < params.MaxAssetsPerAccount; i++ {
+ createSignedTx(src, basics.Round(1), params, &networkState)
+ }
+ require.Equal(t, params.MaxAssetsPerAccount, networkState.assetPerAcct)
+ // txn state has changed to the next one
+ require.Equal(t, protocol.ApplicationCallTx, networkState.txnState)
+
+ params.MaxAppsCreated = 5
+ networkState.appsPerAcct = 0
+ // try to create 6 apps for the account
+ for i := 0; i < params.MaxAppsCreated; i++ {
+ createSignedTx(src, basics.Round(1), params, &networkState)
+ }
+ require.Equal(t, params.MaxAppsCreated, networkState.appsPerAcct)
+ // txn state has changed to the next one
+ require.Equal(t, protocol.PaymentTx, networkState.txnState)
+}
+
+func TestAccountsNeeded(t *testing.T) {
+ params := config.Consensus[protocol.ConsensusCurrentVersion]
+ params.MaxAppsCreated = 10
+ params.MaxAssetsPerAccount = 20
+ nAccounts := accountsNeeded(uint64(100), uint64(400), params)
+
+ require.Equal(t, uint64(20), nAccounts)
+}
diff --git a/netdeploy/remote/nodecfg/nodeConfigurator.go b/netdeploy/remote/nodecfg/nodeConfigurator.go
index 09789cba4..acca0a592 100644
--- a/netdeploy/remote/nodecfg/nodeConfigurator.go
+++ b/netdeploy/remote/nodecfg/nodeConfigurator.go
@@ -33,12 +33,14 @@ import (
)
type nodeConfigurator struct {
- config remote.HostConfig
- dnsName string
- genesisFile string
- genesisData bookkeeping.Genesis
- relayEndpoints []srvEntry
- metricsEndpoints []srvEntry
+ config remote.HostConfig
+ dnsName string
+ genesisFile string
+ genesisData bookkeeping.Genesis
+ bootstrappedBlockFile string
+ bootstrappedTrackerFile string
+ relayEndpoints []srvEntry
+ metricsEndpoints []srvEntry
}
type srvEntry struct {
@@ -63,6 +65,19 @@ func ApplyConfigurationToHost(cfg remote.HostConfig, rootConfigDir, rootNodeDir
// Copy node directories from configuration folder to the rootNodeDir
// Then configure
func (nc *nodeConfigurator) apply(rootConfigDir, rootNodeDir string) (err error) {
+
+ blockFile := filepath.Join(rootConfigDir, "genesisdata", "bootstrapped.block.sqlite")
+ blockFileExists := util.FileExists(blockFile)
+ if blockFileExists {
+ nc.bootstrappedBlockFile = blockFile
+ }
+
+ trackerFile := filepath.Join(rootConfigDir, "genesisdata", "bootstrapped.tracker.sqlite")
+ trackerFileExists := util.FileExists(trackerFile)
+ if trackerFileExists {
+ nc.bootstrappedTrackerFile = trackerFile
+ }
+
nc.genesisFile = filepath.Join(rootConfigDir, "genesisdata", config.GenesisJSONFile)
nc.genesisData, err = bookkeeping.LoadGenesisFromFile(nc.genesisFile)
nodeDirs, err := nc.prepareNodeDirs(nc.config.Nodes, rootConfigDir, rootNodeDir)
@@ -134,6 +149,23 @@ func (nc *nodeConfigurator) prepareNodeDirs(configs []remote.NodeConfig, rootCon
return
}
+ // Copy the bootstrapped files into current ledger folder
+ if nc.bootstrappedBlockFile != "" && nc.bootstrappedTrackerFile != "" {
+ fmt.Fprintf(os.Stdout, "... copying block database file to ledger folder ...\n")
+ dest := filepath.Join(nodeDest, genesisDir, fmt.Sprintf("%s.block.sqlite", config.LedgerFilenamePrefix))
+ _, err = util.CopyFile(nc.bootstrappedBlockFile, dest)
+ if err != nil {
+ return nil, fmt.Errorf("failed to copy database file %s from %s to %s : %w", "bootstrapped.block.sqlite", filepath.Dir(nc.bootstrappedBlockFile), dest, err)
+ }
+ fmt.Fprintf(os.Stdout, "... copying tracker database file to ledger folder ...\n")
+ dest = filepath.Join(nodeDest, genesisDir, fmt.Sprintf("%s.tracker.sqlite", config.LedgerFilenamePrefix))
+ _, err = util.CopyFile(nc.bootstrappedTrackerFile, dest)
+ if err != nil {
+ return nil, fmt.Errorf("failed to copy database file %s from %s to %s : %w", "bootstrapped.tracker.sqlite", filepath.Dir(nc.bootstrappedBlockFile), dest, err)
+ }
+
+ }
+
nodeDirs = append(nodeDirs, nodeDir{
NodeConfig: node,
dataDir: nodeDest,
diff --git a/network/connPerfMon.go b/network/connPerfMon.go
index 4a1355056..79a1bf564 100644
--- a/network/connPerfMon.go
+++ b/network/connPerfMon.go
@@ -233,7 +233,7 @@ func (pm *connectionPerformanceMonitor) notifyPresync(msg *IncomingMessage) {
return
}
pm.lastIncomingMsgTime = msg.Received
- // otherwise, once we recieved a message from each of the peers, move to the sync stage.
+ // otherwise, once we received a message from each of the peers, move to the sync stage.
pm.advanceStage(pmStageSync, msg.Received)
}
diff --git a/network/ping.go b/network/ping.go
index 2fafb970f..064bb5ff9 100644
--- a/network/ping.go
+++ b/network/ping.go
@@ -18,6 +18,7 @@ package network
import (
"bytes"
+ "context"
"time"
"github.com/algorand/go-algorand/crypto"
@@ -35,7 +36,7 @@ func pingHandler(message IncomingMessage) OutgoingMessage {
copy(mbytes, tbytes)
copy(mbytes[len(tbytes):], message.Data)
var digest crypto.Digest // leave blank, ping message too short
- peer.writeNonBlock(mbytes, false, digest, time.Now())
+ peer.writeNonBlock(context.Background(), mbytes, false, digest, time.Now())
return OutgoingMessage{}
}
diff --git a/network/requestTracker.go b/network/requestTracker.go
index 2978f83ba..4d2959a41 100644
--- a/network/requestTracker.go
+++ b/network/requestTracker.go
@@ -285,8 +285,11 @@ func (rt *RequestTracker) Accept() (conn net.Conn, err error) {
rt.hostRequests.pruneRequests(rateLimitingWindowStartTime)
originConnections := rt.hostRequests.countOriginConnections(trackerRequest.remoteHost, rateLimitingWindowStartTime)
+ rateLimitedRemoteHost := (!rt.config.DisableLocalhostConnectionRateLimit) || (!isLocalhost(trackerRequest.remoteHost))
+ connectionLimitEnabled := rt.config.ConnectionsRateLimitingWindowSeconds > 0 && rt.config.ConnectionsRateLimitingCount > 0
+
// check the number of connections
- if originConnections > rt.config.ConnectionsRateLimitingCount && rt.config.ConnectionsRateLimitingWindowSeconds > 0 && rt.config.ConnectionsRateLimitingCount > 0 {
+ if originConnections > rt.config.ConnectionsRateLimitingCount && connectionLimitEnabled && rateLimitedRemoteHost {
rt.hostRequestsMu.Unlock()
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "incoming_connection_per_ip_tcp_rate_limit"})
rt.log.With("connection", "tcp").With("count", originConnections).Debugf("Rejected connection due to excessive connections attempt rate")
@@ -448,7 +451,10 @@ func (rt *RequestTracker) ServeHTTP(response http.ResponseWriter, request *http.
delete(rt.httpConnections, localAddr)
}()
- if originConnections > rt.config.ConnectionsRateLimitingCount && rt.config.ConnectionsRateLimitingWindowSeconds > 0 && rt.config.ConnectionsRateLimitingCount > 0 {
+ rateLimitedRemoteHost := (!rt.config.DisableLocalhostConnectionRateLimit) || (!isLocalhost(trackedRequest.remoteHost))
+ connectionLimitEnabled := rt.config.ConnectionsRateLimitingWindowSeconds > 0 && rt.config.ConnectionsRateLimitingCount > 0
+
+ if originConnections > rt.config.ConnectionsRateLimitingCount && connectionLimitEnabled && rateLimitedRemoteHost {
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "incoming_connection_per_ip_rate_limit"})
rt.log.With("connection", "http").With("count", originConnections).Debugf("Rejected connection due to excessive connections attempt rate")
rt.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerFailEvent,
@@ -501,3 +507,13 @@ func (rt *RequestTracker) getForwardedConnectionAddress(header http.Header) (ip
}
return
}
+
+// isLocalhost returns true if the given host is a localhost address.
+func isLocalhost(host string) bool {
+ for _, v := range []string{"localhost", "127.0.0.1", "[::1]", "::1", "[::]"} {
+ if host == v {
+ return true
+ }
+ }
+ return false
+}
diff --git a/network/requestTracker_test.go b/network/requestTracker_test.go
index 0e3576d13..067841abc 100644
--- a/network/requestTracker_test.go
+++ b/network/requestTracker_test.go
@@ -75,17 +75,20 @@ func TestRateLimiting(t *testing.T) {
}
log := logging.TestingLog(t)
log.SetLevel(logging.Level(defaultConfig.BaseLoggerDebugLevel))
+ testConfig := defaultConfig
+ // This test is conducted locally, so we want to treat all hosts the same for counting incoming requests.
+ testConfig.DisableLocalhostConnectionRateLimit = false
wn := &WebsocketNetwork{
log: log,
- config: defaultConfig,
+ config: testConfig,
phonebook: MakePhonebook(1, 1),
GenesisID: "go-test-network-genesis",
NetworkID: config.Devtestnet,
}
// increase the IncomingConnectionsLimit/MaxConnectionsPerIP limits, since we don't want to test these.
- wn.config.IncomingConnectionsLimit = int(defaultConfig.ConnectionsRateLimitingCount) * 5
- wn.config.MaxConnectionsPerIP += int(defaultConfig.ConnectionsRateLimitingCount) * 5
+ wn.config.IncomingConnectionsLimit = int(testConfig.ConnectionsRateLimitingCount) * 5
+ wn.config.MaxConnectionsPerIP += int(testConfig.ConnectionsRateLimitingCount) * 5
wn.setup()
wn.eventualReadyDelay = time.Second
@@ -99,10 +102,10 @@ func TestRateLimiting(t *testing.T) {
addrA, postListen := netA.Address()
require.Truef(t, postListen, "Listening network failed to start")
- noAddressConfig := defaultConfig
+ noAddressConfig := testConfig
noAddressConfig.NetAddress = ""
- clientsCount := int(defaultConfig.ConnectionsRateLimitingCount + 5)
+ clientsCount := int(testConfig.ConnectionsRateLimitingCount + 5)
networks := make([]*WebsocketNetwork, clientsCount)
phonebooks := make([]Phonebook, clientsCount)
@@ -121,7 +124,7 @@ func TestRateLimiting(t *testing.T) {
}(networks[i], i)
}
- deadline := time.Now().Add(time.Duration(defaultConfig.ConnectionsRateLimitingWindowSeconds) * time.Second)
+ deadline := time.Now().Add(time.Duration(testConfig.ConnectionsRateLimitingWindowSeconds) * time.Second)
for i := 0; i < clientsCount; i++ {
networks[i].Start()
@@ -151,13 +154,25 @@ func TestRateLimiting(t *testing.T) {
// wait abit longer.
}
}
- if connectedClients >= int(defaultConfig.ConnectionsRateLimitingCount) {
+ if connectedClients >= int(testConfig.ConnectionsRateLimitingCount) {
timedOut = time.Now().After(deadline)
break
}
}
if !timedOut {
// test to see that at least some of the clients have seen 429
- require.Equal(t, int(defaultConfig.ConnectionsRateLimitingCount), connectedClients)
+ require.Equal(t, int(testConfig.ConnectionsRateLimitingCount), connectedClients)
}
}
+
+func TestIsLocalHost(t *testing.T) {
+ require.True(t, isLocalhost("localhost"))
+ require.True(t, isLocalhost("127.0.0.1"))
+ require.True(t, isLocalhost("[::1]"))
+ require.True(t, isLocalhost("::1"))
+ require.True(t, isLocalhost("[::]"))
+ require.False(t, isLocalhost("192.168.0.1"))
+ require.False(t, isLocalhost(""))
+ require.False(t, isLocalhost("0.0.0.0"))
+ require.False(t, isLocalhost("127.0.0.0"))
+}
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index cfb3dc8c1..c9ce6d052 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -151,7 +151,9 @@ const (
type GossipNode interface {
Address() (string, bool)
Broadcast(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error
+ BroadcastArray(ctx context.Context, tag []protocol.Tag, data [][]byte, wait bool, except Peer) error
Relay(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error
+ RelayArray(ctx context.Context, tag []protocol.Tag, data [][]byte, wait bool, except Peer) error
Disconnect(badnode Peer)
DisconnectPeers()
Ready() chan struct{}
@@ -201,6 +203,13 @@ type GossipNode interface {
// SubstituteGenesisID substitutes the "{genesisID}" with their network-specific genesisID.
SubstituteGenesisID(rawURL string) string
+
+ // GetPeerData returns a value stored by SetPeerData
+ GetPeerData(peer Peer, key string) interface{}
+
+ // SetPeerData attaches a piece of data to a peer.
+ // Other services inside go-algorand may attach data to a peer that gets garbage collected when the peer is closed.
+ SetPeerData(peer Peer, key string, value interface{})
}
// IncomingMessage represents a message arriving from some peer in our p2p network
@@ -224,8 +233,13 @@ type IncomingMessage struct {
// Tag is a short string (2 bytes) marking a type of message
type Tag = protocol.Tag
-func highPriorityTag(tag protocol.Tag) bool {
- return tag == protocol.AgreementVoteTag || tag == protocol.ProposalPayloadTag
+func highPriorityTag(tags []protocol.Tag) bool {
+ for _, tag := range tags {
+ if tag == protocol.AgreementVoteTag || tag == protocol.ProposalPayloadTag {
+ return true
+ }
+ }
+ return false
}
// OutgoingMessage represents a message we want to send.
@@ -391,11 +405,12 @@ type WebsocketNetwork struct {
}
type broadcastRequest struct {
- tag Tag
- data []byte
+ tags []Tag
+ data [][]byte
except *wsPeer
done chan struct{}
enqueueTime time.Time
+ ctx context.Context
}
// Address returns a string and whether that is a 'final' address or guessed.
@@ -427,15 +442,34 @@ func (wn *WebsocketNetwork) PublicAddress() string {
// Broadcast sends a message.
// If except is not nil then we will not send it to that neighboring Peer.
// if wait is true then the call blocks until the packet has actually been sent to all neighbors.
-// TODO: add `priority` argument so that we don't have to guess it based on tag
func (wn *WebsocketNetwork) Broadcast(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error {
- request := broadcastRequest{tag: tag, data: data, enqueueTime: time.Now()}
+ dataArray := make([][]byte, 1, 1)
+ dataArray[0] = data
+ tagArray := make([]protocol.Tag, 1, 1)
+ tagArray[0] = tag
+ return wn.BroadcastArray(ctx, tagArray, dataArray, wait, except)
+}
+
+// BroadcastArray sends an array of messages.
+// If except is not nil then we will not send it to that neighboring Peer.
+// if wait is true then the call blocks until the packet has actually been sent to all neighbors.
+// TODO: add `priority` argument so that we don't have to guess it based on tag
+func (wn *WebsocketNetwork) BroadcastArray(ctx context.Context, tags []protocol.Tag, data [][]byte, wait bool, except Peer) error {
+ if wn.config.DisableNetworking {
+ return nil
+ }
+
+ if len(tags) != len(data) {
+ return errBcastInvalidArray
+ }
+
+ request := broadcastRequest{tags: tags, data: data, enqueueTime: time.Now(), ctx: ctx}
if except != nil {
request.except = except.(*wsPeer)
}
broadcastQueue := wn.broadcastQueueBulk
- if highPriorityTag(tag) {
+ if highPriorityTag(tags) {
broadcastQueue = wn.broadcastQueueHighPrio
}
if wait {
@@ -480,6 +514,14 @@ func (wn *WebsocketNetwork) Relay(ctx context.Context, tag protocol.Tag, data []
return nil
}
+// RelayArray relays array of messages
+func (wn *WebsocketNetwork) RelayArray(ctx context.Context, tags []protocol.Tag, data [][]byte, wait bool, except Peer) error {
+ if wn.relayMessages {
+ return wn.BroadcastArray(ctx, tags, data, wait, except)
+ }
+ return nil
+}
+
func (wn *WebsocketNetwork) disconnectThread(badnode Peer, reason disconnectReason) {
defer wn.wg.Done()
wn.disconnect(badnode, reason)
@@ -789,6 +831,7 @@ func (wn *WebsocketNetwork) innerStop() {
// Stop blocks until all activity on this node is done.
func (wn *WebsocketNetwork) Stop() {
wn.handlers.ClearHandlers([]Tag{})
+
wn.innerStop()
var listenAddr string
if wn.listener != nil {
@@ -1329,14 +1372,18 @@ func (wn *WebsocketNetwork) innerBroadcast(request broadcastRequest, prio bool,
}
start := time.Now()
- tbytes := []byte(request.tag)
- mbytes := make([]byte, len(tbytes)+len(request.data))
- copy(mbytes, tbytes)
- copy(mbytes[len(tbytes):], request.data)
- var digest crypto.Digest
- if request.tag != protocol.MsgDigestSkipTag && len(request.data) >= messageFilterSize {
- digest = crypto.Hash(mbytes)
+ digests := make([]crypto.Digest, len(request.data), len(request.data))
+ data := make([][]byte, len(request.data), len(request.data))
+ for i, d := range request.data {
+ tbytes := []byte(request.tags[i])
+ mbytes := make([]byte, len(tbytes)+len(d))
+ copy(mbytes, tbytes)
+ copy(mbytes[len(tbytes):], d)
+ data[i] = mbytes
+ if request.tags[i] != protocol.MsgDigestSkipTag && len(d) >= messageFilterSize {
+ digests[i] = crypto.Hash(mbytes)
+ }
}
// first send to all the easy outbound peers who don't block, get them started.
@@ -1348,7 +1395,7 @@ func (wn *WebsocketNetwork) innerBroadcast(request broadcastRequest, prio bool,
if peer == request.except {
continue
}
- ok := peer.writeNonBlock(mbytes, prio, digest, request.enqueueTime)
+ ok := peer.writeNonBlockMsgs(request.ctx, data, prio, digests, request.enqueueTime)
if ok {
sentMessageCount++
continue
@@ -1778,7 +1825,7 @@ func (wn *WebsocketNetwork) getDNSAddrs(dnsBootstrap string) (relaysAddresses []
}
relaysAddresses = nil
}
- if wn.config.EnableCatchupFromArchiveServers {
+ if wn.config.EnableCatchupFromArchiveServers || wn.config.EnableBlockServiceFallbackToArchiver {
archiverAddresses, err = tools_network.ReadFromSRV("archive", "tcp", dnsBootstrap, wn.config.FallbackDNSResolverAddress, wn.config.DNSSecuritySRVEnforced())
if err != nil {
// only log this warning on testnet or devnet
@@ -1839,6 +1886,8 @@ var errNetworkClosing = errors.New("WebsocketNetwork shutting down")
var errBcastCallerCancel = errors.New("caller cancelled broadcast")
+var errBcastInvalidArray = errors.New("invalid broadcast array")
+
var errBcastQFull = errors.New("broadcast queue full")
// HostColonPortPattern matches "^[^:]+:\\d+$" e.g. "foo.com.:1234"
@@ -2045,7 +2094,7 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) {
resp := wn.prioScheme.MakePrioResponse(challenge)
if resp != nil {
mbytes := append([]byte(protocol.NetPrioResponseTag), resp...)
- sent := peer.writeNonBlock(mbytes, true, crypto.Digest{}, time.Now())
+ sent := peer.writeNonBlock(context.Background(), mbytes, true, crypto.Digest{}, time.Now())
if !sent {
wn.log.With("remote", addr).With("local", localAddr).Warnf("could not send priority response to %v", addr)
}
@@ -2054,6 +2103,26 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) {
}
}
+// GetPeerData returns the peer data associated with a particular key.
+func (wn *WebsocketNetwork) GetPeerData(peer Peer, key string) interface{} {
+ switch p := peer.(type) {
+ case *wsPeer:
+ return p.getPeerData(key)
+ default:
+ return nil
+ }
+}
+
+// SetPeerData sets the peer data associated with a particular key.
+func (wn *WebsocketNetwork) SetPeerData(peer Peer, key string, value interface{}) {
+ switch p := peer.(type) {
+ case *wsPeer:
+ p.setPeerData(key, value)
+ default:
+ return
+ }
+}
+
// NewWebsocketNetwork constructor for websockets based gossip network
func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID) (wn *WebsocketNetwork, err error) {
phonebook := MakePhonebook(config.ConnectionsRateLimitingCount,
diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go
index 3464ee750..a97293e4b 100644
--- a/network/wsNetwork_test.go
+++ b/network/wsNetwork_test.go
@@ -289,6 +289,158 @@ func TestWebsocketNetworkUnicast(t *testing.T) {
}
}
+// Like a basic test, but really we just want to have SetPeerData()/GetPeerData()
+func TestWebsocketPeerData(t *testing.T) {
+ netA := makeTestWebsocketNode(t)
+ netA.config.GossipFanout = 1
+ netA.Start()
+ defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ netB := makeTestWebsocketNode(t)
+ netB.config.GossipFanout = 1
+ addrA, postListen := netA.Address()
+ require.True(t, postListen)
+ t.Log(addrA)
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
+ netB.Start()
+ defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ counter := newMessageCounter(t, 2)
+ netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
+
+ readyTimeout := time.NewTimer(2 * time.Second)
+ waitReady(t, netA, readyTimeout.C)
+ t.Log("a ready")
+ waitReady(t, netB, readyTimeout.C)
+ t.Log("b ready")
+
+ require.Equal(t, 1, len(netA.peers))
+ require.Equal(t, 1, len(netA.GetPeers(PeersConnectedIn)))
+ peerB := netA.peers[0]
+
+ require.Equal(t, nil, netA.GetPeerData(peerB, "not there"))
+ netA.SetPeerData(peerB, "foo", "bar")
+ require.Equal(t, "bar", netA.GetPeerData(peerB, "foo"))
+ netA.SetPeerData(peerB, "foo", "qux")
+ require.Equal(t, "qux", netA.GetPeerData(peerB, "foo"))
+ netA.SetPeerData(peerB, "foo", nil)
+ require.Equal(t, nil, netA.GetPeerData(peerB, "foo"))
+}
+
+// Test sending array of messages
+func TestWebsocketNetworkArray(t *testing.T) {
+ netA := makeTestWebsocketNode(t)
+ netA.config.GossipFanout = 1
+ netA.Start()
+ defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ netB := makeTestWebsocketNode(t)
+ netB.config.GossipFanout = 1
+ addrA, postListen := netA.Address()
+ require.True(t, postListen)
+ t.Log(addrA)
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
+ netB.Start()
+ defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ counter := newMessageCounter(t, 3)
+ counterDone := counter.done
+ netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
+
+ readyTimeout := time.NewTimer(2 * time.Second)
+ waitReady(t, netA, readyTimeout.C)
+ t.Log("a ready")
+ waitReady(t, netB, readyTimeout.C)
+ t.Log("b ready")
+
+ tags := []protocol.Tag{protocol.TxnTag, protocol.TxnTag, protocol.TxnTag}
+ data := [][]byte{[]byte("foo"), []byte("bar"), []byte("algo")}
+ netA.BroadcastArray(context.Background(), tags, data, false, nil)
+
+ select {
+ case <-counterDone:
+ case <-time.After(2 * time.Second):
+ t.Errorf("timeout, count=%d, wanted 2", counter.count)
+ }
+}
+
+// Test cancelling message sends
+func TestWebsocketNetworkCancel(t *testing.T) {
+ netA := makeTestWebsocketNode(t)
+ netA.config.GossipFanout = 1
+ netA.Start()
+ defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ netB := makeTestWebsocketNode(t)
+ netB.config.GossipFanout = 1
+ addrA, postListen := netA.Address()
+ require.True(t, postListen)
+ t.Log(addrA)
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
+ netB.Start()
+ defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ counter := newMessageCounter(t, 100)
+ counterDone := counter.done
+ netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
+
+ readyTimeout := time.NewTimer(2 * time.Second)
+ waitReady(t, netA, readyTimeout.C)
+ t.Log("a ready")
+ waitReady(t, netB, readyTimeout.C)
+ t.Log("b ready")
+
+ tags := make([]protocol.Tag, 100)
+ data := make([][]byte, 100)
+ for i := range data {
+ tags[i] = protocol.TxnTag
+ data[i] = []byte(string(i))
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+
+ // try calling BroadcastArray
+ netA.BroadcastArray(ctx, tags, data, true, nil)
+
+ select {
+ case <-counterDone:
+ t.Errorf("All messages were sent, send not cancelled")
+ case <-time.After(2 * time.Second):
+ }
+ assert.Equal(t, 0, counter.Count())
+
+ // try calling innerBroadcast
+ request := broadcastRequest{tags: tags, data: data, enqueueTime: time.Now(), ctx: ctx}
+ peers, _ := netA.peerSnapshot([]*wsPeer{})
+ netA.innerBroadcast(request, true, peers)
+
+ select {
+ case <-counterDone:
+ t.Errorf("All messages were sent, send not cancelled")
+ case <-time.After(2 * time.Second):
+ }
+ assert.Equal(t, 0, counter.Count())
+
+ // try calling writeLoopSend
+ msgs := make([]sendMessage, 0, len(data))
+ enqueueTime := time.Now()
+ for i, msg := range data {
+ tbytes := []byte(tags[i])
+ mbytes := make([]byte, len(tbytes)+len(msg))
+ copy(mbytes, tbytes)
+ copy(mbytes[len(tbytes):], msg)
+ msgs = append(msgs, sendMessage{data: mbytes, enqueued: time.Now(), peerEnqueued: enqueueTime, hash: crypto.Hash(mbytes), ctx: context.Background()})
+ }
+
+ msgs[50].ctx = ctx
+
+ for _, peer := range peers {
+ peer.sendBufferHighPrio <- sendMessages{msgs}
+ }
+
+ select {
+ case <-counterDone:
+ t.Errorf("All messages were sent, send not cancelled")
+ case <-time.After(2 * time.Second):
+ }
+ assert.Equal(t, 50, counter.Count())
+}
+
// Set up two nodes, test that a.Broadcast is received by B, when B has no address.
func TestWebsocketNetworkNoAddress(t *testing.T) {
netA := makeTestWebsocketNode(t)
@@ -621,8 +773,8 @@ func TestSlowOutboundPeer(t *testing.T) {
for i := range destPeers {
destPeers[i].closing = make(chan struct{})
destPeers[i].net = node
- destPeers[i].sendBufferHighPrio = make(chan sendMessage, sendBufferLength)
- destPeers[i].sendBufferBulk = make(chan sendMessage, sendBufferLength)
+ destPeers[i].sendBufferHighPrio = make(chan sendMessages, sendBufferLength)
+ destPeers[i].sendBufferBulk = make(chan sendMessages, sendBufferLength)
destPeers[i].conn = &nopConnSingleton
destPeers[i].rootURL = fmt.Sprintf("fake %d", i)
node.addPeer(&destPeers[i])
@@ -735,7 +887,7 @@ func TestDupFilter(t *testing.T) {
waitReady(t, netC, readyTimeout.C)
t.Log("c ready")
- // TODO: this test has two halves that exercise inbound de-dup and outbound non-send due to recieved hash. But it doesn't properly _test_ them as it doesn't measure _why_ it receives each message exactly once. The second half below could actualy be because of the same inbound de-dup as this first half. You can see the actions of either in metrics.
+ // TODO: this test has two halves that exercise inbound de-dup and outbound non-send due to received hash. But it doesn't properly _test_ them as it doesn't measure _why_ it receives each message exactly once. The second half below could actualy be because of the same inbound de-dup as this first half. You can see the actions of either in metrics.
// algod_network_duplicate_message_received_total{} 2
// algod_outgoing_network_message_filtered_out_total{} 2
// Maybe we should just .Set(0) those counters and use them in this test?
@@ -759,7 +911,7 @@ func TestDupFilter(t *testing.T) {
rand.Read(msg)
t.Log("A send, C non-dup-send")
netA.Broadcast(context.Background(), debugTag2, msg, true, nil)
- // B should broadcast its non-desire to recieve the message again
+ // B should broadcast its non-desire to receive the message again
time.Sleep(500 * time.Millisecond)
// C should now not send these
@@ -1143,10 +1295,14 @@ func TestWebsocketNetwork_checkServerResponseVariables(t *testing.T) {
}
func (wn *WebsocketNetwork) broadcastWithTimestamp(tag protocol.Tag, data []byte, when time.Time) error {
- request := broadcastRequest{tag: tag, data: data, enqueueTime: when}
+ msgArr := make([][]byte, 1, 1)
+ msgArr[0] = data
+ tagArr := make([]protocol.Tag, 1, 1)
+ tagArr[0] = tag
+ request := broadcastRequest{tags: tagArr, data: msgArr, enqueueTime: when, ctx: context.Background()}
broadcastQueue := wn.broadcastQueueBulk
- if highPriorityTag(tag) {
+ if highPriorityTag(tagArr) {
broadcastQueue = wn.broadcastQueueHighPrio
}
// no wait
diff --git a/network/wsPeer.go b/network/wsPeer.go
index 99a96d135..a3f248978 100644
--- a/network/wsPeer.go
+++ b/network/wsPeer.go
@@ -48,10 +48,15 @@ const averageMessageLength = 2 * 1024 // Most of the messages are smaller tha
const msgsInReadBufferPerPeer = 10
var networkSentBytesTotal = metrics.MakeCounter(metrics.NetworkSentBytesTotal)
+var networkSentBytesByTag = metrics.NewTagCounter("algod_network_sent_bytes_{TAG}", "Number of bytes that were sent over the network per message tag")
var networkReceivedBytesTotal = metrics.MakeCounter(metrics.NetworkReceivedBytesTotal)
+var networkReceivedBytesByTag = metrics.NewTagCounter("algod_network_received_bytes_{TAG}", "Number of bytes that were received from the network per message tag")
var networkMessageReceivedTotal = metrics.MakeCounter(metrics.NetworkMessageReceivedTotal)
+var networkMessageReceivedByTag = metrics.NewTagCounter("algod_network_message_received_{TAG}", "Number of complete messages that were received from the network per message tag")
var networkMessageSentTotal = metrics.MakeCounter(metrics.NetworkMessageSentTotal)
+var networkMessageSentByTag = metrics.NewTagCounter("algod_network_message_sent_{TAG}", "Number of complete messages that were sent to the network per message tag")
+
var networkConnectionsDroppedTotal = metrics.MakeCounter(metrics.NetworkConnectionsDroppedTotal)
var networkMessageQueueMicrosTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_message_sent_queue_micros_total", Description: "Total microseconds message spent waiting in queue to be sent"})
@@ -91,7 +96,9 @@ type sendMessage struct {
data []byte
enqueued time.Time // the time at which the message was first generated
peerEnqueued time.Time // the time at which the peer was attempting to enqueue the message
- msgTags map[protocol.Tag]bool // when msgTags is speficied ( i.e. non-nil ), the send goroutine is to replace the message tag filter with this one. No data would be accompanied to this message.
+ msgTags map[protocol.Tag]bool // when msgTags is specified ( i.e. non-nil ), the send goroutine is to replace the message tag filter with this one. No data would be accompanied to this message.
+ hash crypto.Digest
+ ctx context.Context
}
// wsPeerCore also works for non-connected peers we want to do HTTP GET from
@@ -121,6 +128,10 @@ type Response struct {
Topics Topics
}
+type sendMessages struct {
+ msgs []sendMessage
+}
+
type wsPeer struct {
// lastPacketTime contains the UnixNano at the last time a successful communication was made with the peer.
// "successful communication" above refers to either reading from or writing to a connection without receiving any
@@ -132,6 +143,9 @@ type wsPeer struct {
// peer, or zero if no message is being written.
intermittentOutgoingMessageEnqueueTime int64
+ // Nonce used to uniquely identify requests
+ requestNonce uint64
+
wsPeerCore
// conn will be *websocket.Conn (except in testing)
@@ -142,8 +156,8 @@ type wsPeer struct {
closing chan struct{}
- sendBufferHighPrio chan sendMessage
- sendBufferBulk chan sendMessage
+ sendBufferHighPrio chan sendMessages
+ sendBufferBulk chan sendMessages
wg sync.WaitGroup
@@ -180,9 +194,6 @@ type wsPeer struct {
// peer version ( this is one of the version supported by the current node and listed in SupportedProtocolVersions )
version string
- // Nonce used to uniquely identify requests
- requestNonce uint64
-
// responseChannels used by the client to wait on the response of the request
responseChannels map[uint64]chan *Response
@@ -204,6 +215,13 @@ type wsPeer struct {
// throttledOutgoingConnection determines if this outgoing connection will be throttled bassed on it's
// performance or not. Throttled connections are more likely to be short-lived connections.
throttledOutgoingConnection bool
+
+ // clientDataStore is a generic key/value store used to store client-side data entries associated with a particular peer.
+ // Locked by clientDataStoreMu.
+ clientDataStore map[string]interface{}
+
+ // clientDataStoreMu synchronizes access to clientDataStore
+ clientDataStoreMu deadlock.Mutex
}
// HTTPPeer is what the opaque Peer might be.
@@ -266,7 +284,7 @@ func (wp *wsPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag) err
digest = crypto.Hash(mbytes)
}
- ok := wp.writeNonBlock(mbytes, false, digest, time.Now())
+ ok := wp.writeNonBlock(ctx, mbytes, false, digest, time.Now())
if !ok {
networkBroadcastsDropped.Inc(nil)
err = fmt.Errorf("wsPeer failed to unicast: %v", wp.GetAddress())
@@ -290,11 +308,16 @@ func (wp *wsPeer) Respond(ctx context.Context, reqMsg IncomingMessage, responseT
serializedMsg := responseTopics.MarshallTopics()
// Send serializedMsg
- select {
- case wp.sendBufferBulk <- sendMessage{
+ msg := make([]sendMessage, 1, 1)
+ msg[0] = sendMessage{
data: append([]byte(protocol.TopicMsgRespTag), serializedMsg...),
enqueued: time.Now(),
- peerEnqueued: time.Now()}:
+ peerEnqueued: time.Now(),
+ ctx: context.Background(),
+ }
+
+ select {
+ case wp.sendBufferBulk <- sendMessages{msgs: msg}:
case <-wp.closing:
wp.net.log.Debugf("peer closing %s", wp.conn.RemoteAddr().String())
return
@@ -308,11 +331,12 @@ func (wp *wsPeer) Respond(ctx context.Context, reqMsg IncomingMessage, responseT
func (wp *wsPeer) init(config config.Local, sendBufferLength int) {
wp.net.log.Debugf("wsPeer init outgoing=%v %#v", wp.outgoing, wp.rootURL)
wp.closing = make(chan struct{})
- wp.sendBufferHighPrio = make(chan sendMessage, sendBufferLength)
- wp.sendBufferBulk = make(chan sendMessage, sendBufferLength)
+ wp.sendBufferHighPrio = make(chan sendMessages, sendBufferLength)
+ wp.sendBufferBulk = make(chan sendMessages, sendBufferLength)
atomic.StoreInt64(&wp.lastPacketTime, time.Now().UnixNano())
wp.responseChannels = make(map[uint64]chan *Response)
wp.sendMessageTag = defaultSendMessageTags
+ wp.clientDataStore = make(map[string]interface{})
// processed is a channel that messageHandlerThread writes to
// when it's done with one of our messages, so that we can queue
@@ -401,6 +425,8 @@ func (wp *wsPeer) readLoop() {
atomic.StoreInt64(&wp.lastPacketTime, msg.Received)
networkReceivedBytesTotal.AddUint64(uint64(len(msg.Data)+2), nil)
networkMessageReceivedTotal.AddUint64(1, nil)
+ networkReceivedBytesByTag.Add(string(tag[:]), uint64(len(msg.Data)+2))
+ networkMessageReceivedByTag.Add(string(tag[:]), 1)
msg.Sender = wp
// for outgoing connections, we want to notify the connection monitor that we've received
@@ -483,12 +509,15 @@ func (wp *wsPeer) handleMessageOfInterest(msg IncomingMessage) (shutdown bool) {
wp.net.log.Warnf("wsPeer handleMessageOfInterest: could not unmarshall message from: %s %v", wp.conn.RemoteAddr().String(), err)
return
}
- sm := sendMessage{
+ msgs := make([]sendMessage, 1, 1)
+ msgs[0] = sendMessage{
data: nil,
enqueued: time.Now(),
peerEnqueued: time.Now(),
msgTags: msgTagsMap,
+ ctx: context.Background(),
}
+ sm := sendMessages{msgs: msgs}
// try to send the message to the send loop. The send loop will store the message locally and would use it.
// the rationale here is that this message is rarely sent, and we would benefit from having it being lock-free.
@@ -531,7 +560,24 @@ func (wp *wsPeer) handleFilterMessage(msg IncomingMessage) {
wp.outgoingMsgFilter.CheckDigest(digest, true, true)
}
-func (wp *wsPeer) writeLoopSend(msg sendMessage) disconnectReason {
+func (wp *wsPeer) writeLoopSend(msgs sendMessages) disconnectReason {
+ for _, msg := range msgs.msgs {
+ select {
+ case <-msg.ctx.Done():
+ //logging.Base().Infof("cancelled large send, msg %v out of %v", i, len(msgs.msgs))
+ return disconnectReasonNone
+ default:
+ }
+
+ if err := wp.writeLoopSendMsg(msg); err != disconnectReasonNone {
+ return err
+ }
+ }
+
+ return disconnectReasonNone
+}
+
+func (wp *wsPeer) writeLoopSendMsg(msg sendMessage) disconnectReason {
if len(msg.data) > maxMessageLength {
wp.net.log.Errorf("trying to send a message longer than we would recieve: %d > %d tag=%s", len(msg.data), maxMessageLength, string(msg.data[0:2]))
// just drop it, don't break the connection
@@ -569,7 +615,9 @@ func (wp *wsPeer) writeLoopSend(msg sendMessage) disconnectReason {
}
atomic.StoreInt64(&wp.lastPacketTime, time.Now().UnixNano())
networkSentBytesTotal.AddUint64(uint64(len(msg.data)), nil)
+ networkSentBytesByTag.Add(string(tag), uint64(len(msg.data)))
networkMessageSentTotal.AddUint64(1, nil)
+ networkMessageSentByTag.Add(string(tag), 1)
networkMessageQueueMicrosTotal.AddUint64(uint64(time.Now().Sub(msg.peerEnqueued).Nanoseconds()/1000), nil)
return disconnectReasonNone
}
@@ -613,24 +661,47 @@ func (wp *wsPeer) writeLoopCleanup(reason disconnectReason) {
wp.wg.Done()
}
+func (wp *wsPeer) writeNonBlock(ctx context.Context, data []byte, highPrio bool, digest crypto.Digest, msgEnqueueTime time.Time) bool {
+ msgs := make([][]byte, 1, 1)
+ digests := make([]crypto.Digest, 1, 1)
+ msgs[0] = data
+ digests[0] = digest
+ return wp.writeNonBlockMsgs(ctx, msgs, highPrio, digests, msgEnqueueTime)
+}
+
// return true if enqueued/sent
-func (wp *wsPeer) writeNonBlock(data []byte, highPrio bool, digest crypto.Digest, msgEnqueueTime time.Time) bool {
- if wp.outgoingMsgFilter != nil && len(data) > messageFilterSize && wp.outgoingMsgFilter.CheckDigest(digest, false, false) {
- //wp.net.log.Debugf("msg drop as outbound dup %s(%d) %v", string(data[:2]), len(data)-2, digest)
- // peer has notified us it doesn't need this message
- outgoingNetworkMessageFilteredOutTotal.Inc(nil)
- outgoingNetworkMessageFilteredOutBytesTotal.AddUint64(uint64(len(data)), nil)
+func (wp *wsPeer) writeNonBlockMsgs(ctx context.Context, data [][]byte, highPrio bool, digest []crypto.Digest, msgEnqueueTime time.Time) bool {
+ includeIndices := make([]int, 0, len(data))
+ for i := range data {
+ if wp.outgoingMsgFilter != nil && len(data[i]) > messageFilterSize && wp.outgoingMsgFilter.CheckDigest(digest[i], false, false) {
+ //wp.net.log.Debugf("msg drop as outbound dup %s(%d) %v", string(data[:2]), len(data)-2, digest)
+ // peer has notified us it doesn't need this message
+ outgoingNetworkMessageFilteredOutTotal.Inc(nil)
+ outgoingNetworkMessageFilteredOutBytesTotal.AddUint64(uint64(len(data)), nil)
+ } else {
+ includeIndices = append(includeIndices, i)
+ }
+ }
+ if len(includeIndices) == 0 {
// returning true because it is as good as sent, the peer already has it.
return true
}
- var outchan chan sendMessage
+
+ var outchan chan sendMessages
+
+ msgs := make([]sendMessage, 0, len(includeIndices))
+ enqueueTime := time.Now()
+ for _, index := range includeIndices {
+ msgs = append(msgs, sendMessage{data: data[index], enqueued: msgEnqueueTime, peerEnqueued: enqueueTime, hash: digest[index], ctx: ctx})
+ }
+
if highPrio {
outchan = wp.sendBufferHighPrio
} else {
outchan = wp.sendBufferBulk
}
select {
- case outchan <- sendMessage{data: data, enqueued: msgEnqueueTime, peerEnqueued: time.Now()}:
+ case outchan <- sendMessages{msgs: msgs}:
return true
default:
}
@@ -655,7 +726,7 @@ func (wp *wsPeer) sendPing() bool {
copy(mbytes, tagBytes)
crypto.RandBytes(mbytes[len(tagBytes):])
wp.pingData = mbytes[len(tagBytes):]
- sent := wp.writeNonBlock(mbytes, false, crypto.Digest{}, time.Now())
+ sent := wp.writeNonBlock(context.Background(), mbytes, false, crypto.Digest{}, time.Now())
if sent {
wp.pingInFlight = true
@@ -742,11 +813,14 @@ func (wp *wsPeer) Request(ctx context.Context, tag Tag, topics Topics) (resp *Re
defer wp.getAndRemoveResponseChannel(hash)
// Send serializedMsg
- select {
- case wp.sendBufferBulk <- sendMessage{
+ msg := make([]sendMessage, 1, 1)
+ msg[0] = sendMessage{
data: append([]byte(tag), serializedMsg...),
enqueued: time.Now(),
- peerEnqueued: time.Now()}:
+ peerEnqueued: time.Now(),
+ ctx: context.Background()}
+ select {
+ case wp.sendBufferBulk <- sendMessages{msgs: msg}:
case <-wp.closing:
e = fmt.Errorf("peer closing %s", wp.conn.RemoteAddr().String())
return
@@ -780,6 +854,21 @@ func (wp *wsPeer) getAndRemoveResponseChannel(key uint64) (respChan chan *Respon
defer wp.responseChannelsMutex.Unlock()
respChan, found = wp.responseChannels[key]
delete(wp.responseChannels, key)
-
return
}
+
+func (wp *wsPeer) getPeerData(key string) interface{} {
+ wp.clientDataStoreMu.Lock()
+ defer wp.clientDataStoreMu.Unlock()
+ return wp.clientDataStore[key]
+}
+
+func (wp *wsPeer) setPeerData(key string, value interface{}) {
+ wp.clientDataStoreMu.Lock()
+ defer wp.clientDataStoreMu.Unlock()
+ if value == nil {
+ delete(wp.clientDataStore, key)
+ } else {
+ wp.clientDataStore[key] = value
+ }
+}
diff --git a/network/wsPeer_test.go b/network/wsPeer_test.go
index b4165a41c..fabfe21b3 100644
--- a/network/wsPeer_test.go
+++ b/network/wsPeer_test.go
@@ -20,6 +20,7 @@ import (
"encoding/binary"
"testing"
"time"
+ "unsafe"
"github.com/stretchr/testify/require"
)
@@ -77,3 +78,13 @@ func TestDefaultMessageTagsLength(t *testing.T) {
require.Equal(t, 2, len(tag))
}
}
+
+// TestAtomicVariablesAligment ensures that the 64-bit atomic variables
+// offsets are 64-bit aligned. This is required due to go atomic library
+// limitation.
+func TestAtomicVariablesAligment(t *testing.T) {
+ p := wsPeer{}
+ require.True(t, (unsafe.Offsetof(p.requestNonce)%8) == 0)
+ require.True(t, (unsafe.Offsetof(p.lastPacketTime)%8) == 0)
+ require.True(t, (unsafe.Offsetof(p.intermittentOutgoingMessageEnqueueTime)%8) == 0)
+}
diff --git a/node/netprio.go b/node/netprio.go
index 4df0dcc91..c65db60d4 100644
--- a/node/netprio.go
+++ b/node/netprio.go
@@ -78,12 +78,7 @@ func (node *AlgorandFullNode) MakePrioResponse(challenge string) []byte {
// Use the participation key for 2 rounds in the future, so that
// it's unlikely to be deleted from underneath of us.
voteRound := latest + 2
- for _, part := range node.accountManager.Keys() {
- firstValid, lastValid := part.ValidInterval()
- if voteRound < firstValid || voteRound > lastValid {
- continue
- }
-
+ for _, part := range node.accountManager.Keys(voteRound) {
parent := part.Address()
data, err := node.ledger.Lookup(latest, parent)
if err != nil {
diff --git a/node/node.go b/node/node.go
index 29fff9316..ebd4ebb6b 100644
--- a/node/node.go
+++ b/node/node.go
@@ -55,8 +55,6 @@ import (
"github.com/algorand/go-deadlock"
)
-const participationKeyCheckSecs = 60
-
// StatusReport represents the current basic status of the node
type StatusReport struct {
LastRound basics.Round
@@ -227,7 +225,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
}
}
- node.blockService = rpcs.MakeBlockService(cfg, node.ledger, p2pNode, node.genesisID)
+ node.blockService = rpcs.MakeBlockService(node.log, cfg, node.ledger, p2pNode, node.genesisID)
node.ledgerService = rpcs.MakeLedgerService(cfg, node.ledger, p2pNode, node.genesisID)
rpcs.RegisterTxService(node.transactionPool, p2pNode, node.genesisID, cfg.TxPoolSize, cfg.TxSyncServeResponseSize)
@@ -250,14 +248,14 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
Ledger: agreementLedger,
BlockFactory: node,
BlockValidator: blockValidator,
- KeyManager: node.accountManager,
+ KeyManager: node,
RandomSource: node,
BacklogPool: node.highPriorityCryptoVerificationPool,
}
node.agreementService = agreement.MakeService(agreementParameters)
node.catchupBlockAuth = blockAuthenticatorImpl{Ledger: node.ledger, AsyncVoteVerifier: agreement.MakeAsyncVoteVerifier(node.lowPriorityCryptoVerificationPool)}
- node.catchupService = catchup.MakeService(node.log, node.config, p2pNode, node.ledger, node.catchupBlockAuth, agreementLedger.UnmatchedPendingCertificates)
+ node.catchupService = catchup.MakeService(node.log, node.config, p2pNode, node.ledger, node.catchupBlockAuth, agreementLedger.UnmatchedPendingCertificates, node.lowPriorityCryptoVerificationPool)
node.txPoolSyncerService = rpcs.MakeTxSyncer(node.transactionPool, node.net, node.txHandler.SolicitedTxHandler(), time.Duration(cfg.TxSyncIntervalSeconds)*time.Second, time.Duration(cfg.TxSyncTimeoutSeconds)*time.Second, cfg.TxSyncServeResponseSize)
err = node.loadParticipationKeys()
@@ -342,8 +340,10 @@ func (node *AlgorandFullNode) Start() {
// Set up a context we can use to cancel goroutines on Stop()
node.ctx, node.cancelCtx = context.WithCancel(context.Background())
- // start accepting connections
- node.net.Start()
+ if !node.config.DisableNetworking {
+ // start accepting connections
+ node.net.Start()
+ }
node.config.NetAddress, _ = node.net.Address()
if node.catchpointCatchupService != nil {
@@ -417,7 +417,9 @@ func (node *AlgorandFullNode) Stop() {
}()
node.net.ClearHandlers()
- node.net.Stop()
+ if !node.config.DisableNetworking {
+ node.net.Stop()
+ }
if node.catchpointCatchupService != nil {
node.catchpointCatchupService.Stop()
} else {
@@ -700,7 +702,7 @@ func (node *AlgorandFullNode) GetPendingTxnsFromPool() ([]transactions.SignedTxn
// Reload participation keys from disk periodically
func (node *AlgorandFullNode) checkForParticipationKeys() {
defer node.monitoringRoutinesWaitGroup.Done()
- ticker := time.NewTicker(participationKeyCheckSecs * time.Second)
+ ticker := time.NewTicker(node.config.ParticipationKeysRefreshInterval)
for {
select {
case <-ticker.C:
@@ -734,6 +736,13 @@ func (node *AlgorandFullNode) loadParticipationKeys() error {
// Fetch a handle to this database
handle, err := node.getExistingPartHandle(filename)
if err != nil {
+ if db.IsErrBusy(err) {
+ // this is a special case:
+ // we might get "database is locked" when we attempt to access a database that is conurrently updates it's participation keys.
+ // that database is clearly already on the account manager, and doesn't need to be processed through this logic, and therefore
+ // we can safely ignore that fail case.
+ continue
+ }
return fmt.Errorf("AlgorandFullNode.loadParticipationKeys: cannot load db %v: %v", filename, err)
}
@@ -1053,3 +1062,54 @@ func (node *AlgorandFullNode) AssembleBlock(round basics.Round, deadline time.Ti
}
return validatedBlock{vb: lvb}, nil
}
+
+// VotingKeys implements the key maanger's VotingKeys method, and provides additional validation with the ledger.
+// that allows us to load multiple overlapping keys for the same account, and filter these per-round basis.
+func (node *AlgorandFullNode) VotingKeys(votingRound, keysRound basics.Round) []account.Participation {
+ keys := node.accountManager.Keys(votingRound)
+
+ participations := make([]account.Participation, 0, len(keys))
+ accountsData := make(map[basics.Address]basics.AccountData, len(keys))
+ matchingAccountsKeys := make(map[basics.Address]bool)
+ mismatchingAccountsKeys := make(map[basics.Address]int)
+ const bitMismatchingVotingKey = 1
+ const bitMismatchingSelectionKey = 2
+ for _, part := range keys {
+ acctData, hasAccountData := accountsData[part.Parent]
+ if !hasAccountData {
+ var err error
+ acctData, _, err = node.ledger.LookupWithoutRewards(keysRound, part.Parent)
+ if err != nil {
+ node.log.Warnf("node.VotingKeys: Account %v not participating: cannot locate account for round %d : %v", part.Address(), keysRound, err)
+ continue
+ }
+ accountsData[part.Parent] = acctData
+ }
+
+ if acctData.VoteID != part.Voting.OneTimeSignatureVerifier {
+ mismatchingAccountsKeys[part.Address()] = mismatchingAccountsKeys[part.Address()] | bitMismatchingVotingKey
+ continue
+ }
+ if acctData.SelectionID != part.VRF.PK {
+ mismatchingAccountsKeys[part.Address()] = mismatchingAccountsKeys[part.Address()] | bitMismatchingSelectionKey
+ continue
+ }
+ participations = append(participations, part)
+ matchingAccountsKeys[part.Address()] = true
+ }
+ // write the warnings per account only if we couldn't find a single valid key for that account.
+ for mismatchingAddr, warningFlags := range mismatchingAccountsKeys {
+ if matchingAccountsKeys[mismatchingAddr] {
+ continue
+ }
+ if warningFlags&bitMismatchingVotingKey == bitMismatchingVotingKey {
+ node.log.Warnf("node.VotingKeys: Account %v not participating on round %d: on chain voting key differ from participation voting key for round %d", mismatchingAddr, votingRound, keysRound)
+ continue
+ }
+ if warningFlags&bitMismatchingSelectionKey == bitMismatchingSelectionKey {
+ node.log.Warnf("node.VotingKeys: Account %v not participating on round %d: on chain selection key differ from participation selection key for round %d", mismatchingAddr, votingRound, keysRound)
+ continue
+ }
+ }
+ return participations
+}
diff --git a/nodecontrol/NodeController.go b/nodecontrol/NodeController.go
index 202772f70..ab8f84c41 100644
--- a/nodecontrol/NodeController.go
+++ b/nodecontrol/NodeController.go
@@ -17,7 +17,6 @@
package nodecontrol
import (
- "os"
"path/filepath"
"syscall"
"time"
@@ -112,26 +111,26 @@ func (nc NodeController) stopProcesses() (kmdAlreadyStopped bool, err error) {
return
}
-func killPID(pid int) error {
- process, err := os.FindProcess(pid)
+func killPID(pid int) (killed bool, err error) {
+ process, err := util.FindProcess(pid)
if process == nil || err != nil {
- return err
+ return false, err
}
err = util.KillProcess(pid, syscall.SIGTERM)
if err != nil {
- return err
+ return false, err
}
waitLong := time.After(time.Second * 30)
for {
// Send null signal - if process still exists, it'll return nil
// So when we get an error, assume it's gone.
if err = process.Signal(syscall.Signal(0)); err != nil {
- return nil
+ return false, nil
}
select {
case <-waitLong:
- return util.KillProcess(pid, syscall.SIGKILL)
+ return true, util.KillProcess(pid, syscall.SIGKILL)
case <-time.After(time.Millisecond * 100):
}
}
diff --git a/nodecontrol/algodControl.go b/nodecontrol/algodControl.go
index a0fde4f58..6ba5413bb 100644
--- a/nodecontrol/algodControl.go
+++ b/nodecontrol/algodControl.go
@@ -160,9 +160,15 @@ func (nc *NodeController) StopAlgod() (err error) {
algodPID, err := nc.GetAlgodPID()
if err == nil {
// Kill algod by PID
- err = killPID(int(algodPID))
- if err != nil {
- return
+ killed, killErr := killPID(int(algodPID))
+ if killErr != nil {
+ return killErr
+ }
+ // if we ended up killing the process, make sure to delete the pid file to avoid
+ // potential downstream issues.
+ if killed {
+ // delete the pid file.
+ os.Remove(nc.algodPidFile)
}
} else {
return &NodeNotRunningError{algodDataDir: nc.algodDataDir}
diff --git a/nodecontrol/kmdControl.go b/nodecontrol/kmdControl.go
index b23e34d17..08fd8a2c3 100644
--- a/nodecontrol/kmdControl.go
+++ b/nodecontrol/kmdControl.go
@@ -121,9 +121,15 @@ func (kc *KMDController) StopKMD() (alreadyStopped bool, err error) {
kmdPID, err := kc.GetKMDPID()
if err == nil {
// Kill kmd by PID
- err = killPID(int(kmdPID))
- if err != nil {
- return
+ killed, killErr := killPID(int(kmdPID))
+ if killErr != nil {
+ return false, killErr
+ }
+ // if we ended up killing the process, make sure to delete the pid file to avoid
+ // potential downstream issues.
+ if killed {
+ // delete the pid file.
+ os.Remove(kc.kmdPIDPath)
}
} else {
err = nil
@@ -200,8 +206,7 @@ func (kc *KMDController) StartKMD(args KMDStartArgs) (alreadyRunning bool, err e
logging.Base().Errorf("%s: kmd data dir exists but is not a directory", kc.kmdDataDir)
return false, errors.New("bad kmd data dir")
}
- if (dataDirStat.Mode() & 0077) != 0 {
- logging.Base().Errorf("%s: kmd data dir exists but is too permissive (%o), change to (%o)", kc.kmdDataDir, dataDirStat.Mode()&0777, DefaultKMDDataDirPerms)
+ if !kc.isDirectorySafe(dataDirStat) {
return false, errors.New("kmd data dir not secure")
}
} else {
diff --git a/nodecontrol/kmdControl_common.go b/nodecontrol/kmdControl_common.go
new file mode 100644
index 000000000..947ccd80a
--- /dev/null
+++ b/nodecontrol/kmdControl_common.go
@@ -0,0 +1,33 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+// +build !windows
+
+package nodecontrol
+
+import (
+ "os"
+
+ "github.com/algorand/go-algorand/logging"
+)
+
+func (kc *KMDController) isDirectorySafe(dirStats os.FileInfo) bool {
+ if (dirStats.Mode() & 0077) != 0 {
+ logging.Base().Errorf("%s: kmd data dir exists but is too permissive (%o), change to (%o)", kc.kmdDataDir, dirStats.Mode()&0777, DefaultKMDDataDirPerms)
+ return false
+ }
+ return true
+}
diff --git a/nodecontrol/kmdControl_windows.go b/nodecontrol/kmdControl_windows.go
new file mode 100644
index 000000000..601cba07f
--- /dev/null
+++ b/nodecontrol/kmdControl_windows.go
@@ -0,0 +1,25 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package nodecontrol
+
+import (
+ "os"
+)
+
+func (kc *KMDController) isDirectorySafe(_ os.FileInfo) bool {
+ return true
+}
diff --git a/rpcs/blockService.go b/rpcs/blockService.go
index fec1fe402..677446a7f 100644
--- a/rpcs/blockService.go
+++ b/rpcs/blockService.go
@@ -20,7 +20,9 @@ import (
"context"
"encoding/binary"
"net/http"
+ "path"
"strconv"
+ "strings"
"github.com/gorilla/mux"
@@ -28,6 +30,7 @@ import (
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
@@ -66,6 +69,9 @@ type BlockService struct {
net network.GossipNode
enableService bool
enableServiceOverGossip bool
+ fallbackEndpoints fallbackEndpoints
+ enableArchiverFallback bool
+ log logging.Logger
}
// EncodedBlockCert defines how GetBlockBytes encodes a block and its certificate
@@ -84,8 +90,13 @@ type PreEncodedBlockCert struct {
Certificate codec.Raw `codec:"cert"`
}
+type fallbackEndpoints struct {
+ endpoints []string
+ lastUsed int
+}
+
// MakeBlockService creates a BlockService around the provider Ledger and registers it for HTTP callback on the block serving path
-func MakeBlockService(config config.Local, ledger *data.Ledger, net network.GossipNode, genesisID string) *BlockService {
+func MakeBlockService(log logging.Logger, config config.Local, ledger *data.Ledger, net network.GossipNode, genesisID string) *BlockService {
service := &BlockService{
ledger: ledger,
genesisID: genesisID,
@@ -93,6 +104,9 @@ func MakeBlockService(config config.Local, ledger *data.Ledger, net network.Goss
net: net,
enableService: config.EnableBlockService,
enableServiceOverGossip: config.EnableGossipBlockService,
+ fallbackEndpoints: makeFallbackEndpoints(log, config.BlockServiceCustomFallbackEndpoints),
+ enableArchiverFallback: config.EnableBlockServiceFallbackToArchiver,
+ log: log,
}
if service.enableService {
net.RegisterHTTPHandler(BlockServiceBlockPath, service)
@@ -129,19 +143,19 @@ func (bs *BlockService) ServeHTTP(response http.ResponseWriter, request *http.Re
genesisID, hasGenesisID := pathVars["genesisID"]
if hasVersionStr {
if versionStr != "1" {
- logging.Base().Debug("http block bad version", versionStr)
+ bs.log.Debug("http block bad version", versionStr)
response.WriteHeader(http.StatusBadRequest)
return
}
}
if hasGenesisID {
if bs.genesisID != genesisID {
- logging.Base().Debugf("http block bad genesisID mine=%#v theirs=%#v", bs.genesisID, genesisID)
+ bs.log.Debugf("http block bad genesisID mine=%#v theirs=%#v", bs.genesisID, genesisID)
response.WriteHeader(http.StatusBadRequest)
return
}
} else {
- logging.Base().Debug("http block no genesisID")
+ bs.log.Debug("http block no genesisID")
response.WriteHeader(http.StatusBadRequest)
return
}
@@ -150,13 +164,13 @@ func (bs *BlockService) ServeHTTP(response http.ResponseWriter, request *http.Re
request.Body = http.MaxBytesReader(response, request.Body, blockServerMaxBodyLength)
err := request.ParseForm()
if err != nil {
- logging.Base().Debug("http block parse form err", err)
+ bs.log.Debug("http block parse form err", err)
response.WriteHeader(http.StatusBadRequest)
return
}
roundStrs, ok := request.Form["b"]
if !ok || len(roundStrs) != 1 {
- logging.Base().Debug("http block bad block id form arg")
+ bs.log.Debug("http block bad block id form arg")
response.WriteHeader(http.StatusBadRequest)
return
}
@@ -165,12 +179,12 @@ func (bs *BlockService) ServeHTTP(response http.ResponseWriter, request *http.Re
if ok {
if len(versionStrs) == 1 {
if versionStrs[0] != "1" {
- logging.Base().Debug("http block bad version", versionStr)
+ bs.log.Debug("http block bad version", versionStr)
response.WriteHeader(http.StatusBadRequest)
return
}
} else {
- logging.Base().Debug("http block wrong number of v args", len(versionStrs))
+ bs.log.Debug("http block wrong number of v args", len(versionStrs))
response.WriteHeader(http.StatusBadRequest)
return
}
@@ -180,7 +194,7 @@ func (bs *BlockService) ServeHTTP(response http.ResponseWriter, request *http.Re
}
round, err := strconv.ParseUint(roundStr, 36, 64)
if err != nil {
- logging.Base().Debug("http block round parse fail", roundStr, err)
+ bs.log.Debug("http block round parse fail", roundStr, err)
response.WriteHeader(http.StatusBadRequest)
return
}
@@ -189,12 +203,15 @@ func (bs *BlockService) ServeHTTP(response http.ResponseWriter, request *http.Re
switch err.(type) {
case ledgercore.ErrNoEntry:
// entry cound not be found.
- response.Header().Set("Cache-Control", blockResponseMissingBlockCacheControl)
- response.WriteHeader(http.StatusNotFound)
+ ok := bs.redirectRequest(round, response, request)
+ if !ok {
+ response.Header().Set("Cache-Control", blockResponseMissingBlockCacheControl)
+ response.WriteHeader(http.StatusNotFound)
+ }
return
default:
// unexpected error.
- logging.Base().Warnf("ServeHTTP : failed to retrieve block %d %v", round, err)
+ bs.log.Warnf("ServeHTTP : failed to retrieve block %d %v", round, err)
response.WriteHeader(http.StatusInternalServerError)
return
}
@@ -206,7 +223,7 @@ func (bs *BlockService) ServeHTTP(response http.ResponseWriter, request *http.Re
response.WriteHeader(http.StatusOK)
_, err = response.Write(encodedBlockCert)
if err != nil {
- logging.Base().Warn("http block write failed ", err)
+ bs.log.Warn("http block write failed ", err)
}
}
@@ -251,14 +268,14 @@ func (bs *BlockService) handleCatchupReq(ctx context.Context, reqMsg network.Inc
topics, err := network.UnmarshallTopics(reqMsg.Data)
if err != nil {
- logging.Base().Infof("BlockService handleCatchupReq: %s", err.Error())
+ bs.log.Infof("BlockService handleCatchupReq: %s", err.Error())
respTopics = network.Topics{
network.MakeTopic(network.ErrorKey, []byte(err.Error()))}
return
}
roundBytes, found := topics.GetValue(RoundKey)
if !found {
- logging.Base().Infof("BlockService handleCatchupReq: %s", noRoundNumberErrMsg)
+ bs.log.Infof("BlockService handleCatchupReq: %s", noRoundNumberErrMsg)
respTopics = network.Topics{
network.MakeTopic(network.ErrorKey,
[]byte(noRoundNumberErrMsg))}
@@ -266,7 +283,7 @@ func (bs *BlockService) handleCatchupReq(ctx context.Context, reqMsg network.Inc
}
requestType, found := topics.GetValue(RequestDataTypeKey)
if !found {
- logging.Base().Infof("BlockService handleCatchupReq: %s", noDataTypeErrMsg)
+ bs.log.Infof("BlockService handleCatchupReq: %s", noDataTypeErrMsg)
respTopics = network.Topics{
network.MakeTopic(network.ErrorKey,
[]byte(noDataTypeErrMsg))}
@@ -275,23 +292,74 @@ func (bs *BlockService) handleCatchupReq(ctx context.Context, reqMsg network.Inc
round, read := binary.Uvarint(roundBytes)
if read <= 0 {
- logging.Base().Infof("BlockService handleCatchupReq: %s", roundNumberParseErrMsg)
+ bs.log.Infof("BlockService handleCatchupReq: %s", roundNumberParseErrMsg)
respTopics = network.Topics{
network.MakeTopic(network.ErrorKey,
[]byte(roundNumberParseErrMsg))}
return
}
- respTopics = topicBlockBytes(bs.ledger, basics.Round(round), string(requestType))
+ respTopics = topicBlockBytes(bs.log, bs.ledger, basics.Round(round), string(requestType))
return
}
-func topicBlockBytes(dataLedger *data.Ledger, round basics.Round, requestType string) network.Topics {
+// redirectRequest redirects the request to the next round robin fallback endpoing if available, otherwise,
+// if EnableBlockServiceFallbackToArchiver is enabled, redirects to a random archiver.
+func (bs *BlockService) redirectRequest(round uint64, response http.ResponseWriter, request *http.Request) (ok bool) {
+ peerAddress := bs.getNextCustomFallbackEndpoint()
+ if peerAddress == "" && bs.enableArchiverFallback {
+ peerAddress = bs.getRandomArchiver()
+ }
+ if peerAddress == "" {
+ return false
+ }
+
+ parsedURL, err := network.ParseHostOrURL(peerAddress)
+ if err != nil {
+ bs.log.Debugf("redirectRequest: %s", err.Error())
+ return false
+ }
+ parsedURL.Path = FormatBlockQuery(round, parsedURL.Path, bs.net)
+ http.Redirect(response, request, parsedURL.String(), http.StatusTemporaryRedirect)
+ bs.log.Debugf("redirectRequest: redirected block request to %s", parsedURL.String())
+ return true
+}
+
+// getNextCustomFallbackEndpoint returns the next custorm fallback endpoint in RR ordering
+func (bs *BlockService) getNextCustomFallbackEndpoint() (endpointAddress string) {
+ if len(bs.fallbackEndpoints.endpoints) == 0 {
+ return
+ }
+ endpointAddress = bs.fallbackEndpoints.endpoints[bs.fallbackEndpoints.lastUsed]
+ bs.fallbackEndpoints.lastUsed = (bs.fallbackEndpoints.lastUsed + 1) % len(bs.fallbackEndpoints.endpoints)
+ return
+}
+
+// getRandomArchiver returns a random archiver address
+func (bs *BlockService) getRandomArchiver() (endpointAddress string) {
+ peers := bs.net.GetPeers(network.PeersPhonebookArchivers)
+ httpPeers := make([]network.HTTPPeer, 0, len(peers))
+
+ for _, peer := range peers {
+ httpPeer, validHTTPPeer := peer.(network.HTTPPeer)
+ if validHTTPPeer {
+ httpPeers = append(httpPeers, httpPeer)
+ }
+ }
+ if len(httpPeers) == 0 {
+ return
+ }
+ randIndex := crypto.RandUint64() % uint64(len(httpPeers))
+ endpointAddress = httpPeers[randIndex].GetAddress()
+ return
+}
+
+func topicBlockBytes(log logging.Logger, dataLedger *data.Ledger, round basics.Round, requestType string) network.Topics {
blk, cert, err := dataLedger.EncodedBlockCert(round)
if err != nil {
switch err.(type) {
case ledgercore.ErrNoEntry:
default:
- logging.Base().Infof("BlockService topicBlockBytes: %s", err)
+ log.Infof("BlockService topicBlockBytes: %s", err)
}
return network.Topics{
network.MakeTopic(network.ErrorKey, []byte(blockNotAvailabeErrMsg))}
@@ -326,3 +394,24 @@ func RawBlockBytes(l *data.Ledger, round basics.Round) ([]byte, error) {
Certificate: cert,
}), nil
}
+
+// FormatBlockQuery formats a block request query for the given network and round number
+func FormatBlockQuery(round uint64, parsedURL string, net network.GossipNode) string {
+ return net.SubstituteGenesisID(path.Join(parsedURL, "/v1/{genesisID}/block/"+strconv.FormatUint(uint64(round), 36)))
+}
+
+func makeFallbackEndpoints(log logging.Logger, customFallbackEndpoints string) (fe fallbackEndpoints) {
+ if customFallbackEndpoints == "" {
+ return
+ }
+ endpoints := strings.Split(customFallbackEndpoints, ",")
+ for _, ep := range endpoints {
+ parsed, err := network.ParseHostOrURL(ep)
+ if err != nil {
+ log.Warnf("makeFallbackEndpoints: error parsing %s %s", ep, err.Error())
+ continue
+ }
+ fe.endpoints = append(fe.endpoints, parsed.String())
+ }
+ return
+}
diff --git a/rpcs/blockService_test.go b/rpcs/blockService_test.go
index 042988eff..343e5571c 100644
--- a/rpcs/blockService_test.go
+++ b/rpcs/blockService_test.go
@@ -18,10 +18,19 @@ package rpcs
import (
"context"
+ "fmt"
+ "net/http"
"testing"
+ "time"
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
)
@@ -56,6 +65,7 @@ func TestHandleCatchupReqNegative(t *testing.T) {
}
ls := BlockService{
ledger: nil,
+ log: logging.TestingLog(t),
}
// case where topics is nil
@@ -99,3 +109,201 @@ func TestHandleCatchupReqNegative(t *testing.T) {
require.Equal(t, true, found)
require.Equal(t, roundNumberParseErrMsg, string(val))
}
+
+// TestRedirectBasic tests the case when the block service redirects the request to elsewhere
+func TestRedirectFallbackArchiver(t *testing.T) {
+ log := logging.TestingLog(t)
+
+ ledger1 := makeLedger(t, "l1")
+ defer ledger1.Close()
+ ledger2 := makeLedger(t, "l2")
+ defer ledger2.Close()
+ addBlock(t, ledger1)
+ addBlock(t, ledger2)
+ addBlock(t, ledger2)
+
+ net1 := &httpTestPeerSource{}
+ net2 := &httpTestPeerSource{}
+
+ config := config.GetDefaultLocal()
+ bs1 := MakeBlockService(log, config, ledger1, net1, "{genesisID}")
+ bs2 := MakeBlockService(log, config, ledger2, net2, "{genesisID}")
+
+ nodeA := &basicRPCNode{}
+ nodeB := &basicRPCNode{}
+
+ nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1)
+ nodeA.start()
+ defer nodeA.stop()
+
+ nodeB.RegisterHTTPHandler(BlockServiceBlockPath, bs2)
+ nodeB.start()
+ defer nodeB.stop()
+
+ net1.addPeer(nodeB.rootURL())
+
+ parsedURL, err := network.ParseHostOrURL(nodeA.rootURL())
+ require.NoError(t, err)
+
+ client := http.Client{}
+
+ ctx := context.Background()
+ parsedURL.Path = FormatBlockQuery(uint64(2), parsedURL.Path, net1)
+ blockURL := parsedURL.String()
+ request, err := http.NewRequest("GET", blockURL, nil)
+ require.NoError(t, err)
+ requestCtx, requestCancel := context.WithTimeout(ctx, time.Duration(config.CatchupHTTPBlockFetchTimeoutSec)*time.Second)
+ defer requestCancel()
+ request = request.WithContext(requestCtx)
+ network.SetUserAgentHeader(request.Header)
+ response, err := client.Do(request)
+ require.NoError(t, err)
+
+ require.Equal(t, http.StatusOK, response.StatusCode)
+}
+
+// TestRedirectBasic tests the case when the block service redirects the request to elsewhere
+func TestRedirectFallbackEndpoints(t *testing.T) {
+ log := logging.TestingLog(t)
+
+ ledger1 := makeLedger(t, "l1")
+ defer ledger1.Close()
+ ledger2 := makeLedger(t, "l2")
+ defer ledger2.Close()
+ addBlock(t, ledger2)
+
+ net1 := &httpTestPeerSource{}
+ net2 := &httpTestPeerSource{}
+
+ nodeA := &basicRPCNode{}
+ nodeB := &basicRPCNode{}
+ nodeA.start()
+ defer nodeA.stop()
+ nodeB.start()
+ defer nodeB.stop()
+
+ config := config.GetDefaultLocal()
+ // Set the first to a bad address, the second to self, and the third to the one that has the block.
+ // If RR is right, should succeed.
+ config.BlockServiceCustomFallbackEndpoints = fmt.Sprintf("://badaddress,%s,%s", nodeA.rootURL(), nodeB.rootURL())
+ bs1 := MakeBlockService(log, config, ledger1, net1, "{genesisID}")
+ bs2 := MakeBlockService(log, config, ledger2, net2, "{genesisID}")
+
+ nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1)
+ nodeB.RegisterHTTPHandler(BlockServiceBlockPath, bs2)
+
+ parsedURL, err := network.ParseHostOrURL(nodeA.rootURL())
+ require.NoError(t, err)
+
+ client := http.Client{}
+
+ ctx := context.Background()
+ parsedURL.Path = FormatBlockQuery(uint64(1), parsedURL.Path, net1)
+ blockURL := parsedURL.String()
+ request, err := http.NewRequest("GET", blockURL, nil)
+ require.NoError(t, err)
+ requestCtx, requestCancel := context.WithTimeout(ctx, time.Duration(config.CatchupHTTPBlockFetchTimeoutSec)*time.Second)
+ defer requestCancel()
+ request = request.WithContext(requestCtx)
+ network.SetUserAgentHeader(request.Header)
+ response, err := client.Do(request)
+ require.NoError(t, err)
+
+ require.Equal(t, http.StatusOK, response.StatusCode)
+}
+
+// TestRedirectExceptions tests exception cases:
+// - the case when the peer is not a valid http peer
+// - the case when the block service keeps redirecting and cannot get a block
+func TestRedirectExceptions(t *testing.T) {
+ log := logging.TestingLog(t)
+
+ ledger1 := makeLedger(t, "l1")
+ defer ledger1.Close()
+ addBlock(t, ledger1)
+
+ net1 := &httpTestPeerSource{}
+
+ config := config.GetDefaultLocal()
+ bs1 := MakeBlockService(log, config, ledger1, net1, "{genesisID}")
+
+ nodeA := &basicRPCNode{}
+
+ nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1)
+ nodeA.start()
+ defer nodeA.stop()
+
+ net1.peers = append(net1.peers, "invalidPeer")
+
+ parsedURL, err := network.ParseHostOrURL(nodeA.rootURL())
+ require.NoError(t, err)
+
+ client := http.Client{}
+
+ ctx := context.Background()
+ parsedURL.Path = FormatBlockQuery(uint64(2), parsedURL.Path, net1)
+ blockURL := parsedURL.String()
+ request, err := http.NewRequest("GET", blockURL, nil)
+ require.NoError(t, err)
+ requestCtx, requestCancel := context.WithTimeout(ctx, time.Duration(config.CatchupHTTPBlockFetchTimeoutSec)*time.Second)
+ defer requestCancel()
+ request = request.WithContext(requestCtx)
+ network.SetUserAgentHeader(request.Header)
+
+ response, err := client.Do(request)
+ require.NoError(t, err)
+ require.Equal(t, response.StatusCode, http.StatusNotFound)
+
+ net1.addPeer(nodeA.rootURL())
+ _, err = client.Do(request)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "stopped after 10 redirects")
+}
+
+var poolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+var sinkAddr = basics.Address{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21}
+
+func makeLedger(t *testing.T, namePostfix string) *data.Ledger {
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ genesis := make(map[basics.Address]basics.AccountData)
+ genesis[sinkAddr] = basics.AccountData{
+ Status: basics.Online,
+ MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2000000},
+ }
+ genesis[poolAddr] = basics.AccountData{
+ Status: basics.Online,
+ MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2000000},
+ }
+
+ log := logging.TestingLog(t)
+ genBal := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
+ genHash := crypto.Digest{0x42}
+ cfg := config.GetDefaultLocal()
+ const inMem = true
+
+ ledger, err := data.LoadLedger(
+ log, t.Name()+namePostfix, inMem, protocol.ConsensusCurrentVersion, genBal, "", genHash,
+ nil, cfg,
+ )
+ require.NoError(t, err)
+ return ledger
+}
+
+func addBlock(t *testing.T, ledger *data.Ledger) {
+ blk, err := ledger.Block(ledger.LastRound())
+ require.NoError(t, err)
+ blk.BlockHeader.Round++
+ blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000)
+ blk.TxnRoot, err = blk.PaysetCommit()
+ require.NoError(t, err)
+
+ var cert agreement.Certificate
+ cert.Proposal.BlockDigest = blk.Digest()
+
+ err = ledger.AddBlock(blk, cert)
+ require.NoError(t, err)
+
+ hdr, err := ledger.BlockHdr(blk.BlockHeader.Round)
+ require.NoError(t, err)
+ require.Equal(t, blk.BlockHeader, hdr)
+}
diff --git a/rpcs/txService_test.go b/rpcs/txService_test.go
index 8e984bf48..b43ea7f91 100644
--- a/rpcs/txService_test.go
+++ b/rpcs/txService_test.go
@@ -51,6 +51,11 @@ func (s *httpTestPeerSource) GetPeers(options ...network.PeerOption) []network.P
return s.peers
}
+func (s *httpTestPeerSource) addPeer(rootURL string) {
+ peer := testHTTPPeer(rootURL)
+ s.peers = append(s.peers, &peer)
+}
+
// implement network.HTTPPeer
type testHTTPPeer string
diff --git a/rpcs/txSyncer_test.go b/rpcs/txSyncer_test.go
index de4bc0293..69b517806 100644
--- a/rpcs/txSyncer_test.go
+++ b/rpcs/txSyncer_test.go
@@ -31,7 +31,6 @@ import (
"github.com/algorand/go-algorand/components/mocks"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/logging"
@@ -130,9 +129,6 @@ func (client *mockRPCClient) Sync(ctx context.Context, bloom *bloom.Filter) (txg
}
return client.client.txgroups, nil
}
-func (client *mockRPCClient) GetBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) {
- return nil, nil
-}
// network.HTTPPeer interface
func (client *mockRPCClient) GetAddress() string {
diff --git a/scripts/configure_dev.sh b/scripts/configure_dev.sh
index d22468368..83e06af8f 100755
--- a/scripts/configure_dev.sh
+++ b/scripts/configure_dev.sh
@@ -70,12 +70,10 @@ function install_windows_shellcheck() {
if [ "${OS}" = "linux" ]; then
if ! which sudo > /dev/null; then
- apt-get update
- apt-get -y install sudo
+ "$SCRIPTPATH/install_linux_deps.sh"
+ else
+ sudo "$SCRIPTPATH/install_linux_deps.sh"
fi
-
- sudo apt-get update
- sudo apt-get install -y libboost-all-dev expect jq autoconf shellcheck sqlite3 python3-venv
elif [ "${OS}" = "darwin" ]; then
brew update
brew tap homebrew/cask
@@ -103,4 +101,3 @@ if ${SKIP_GO_DEPS}; then
fi
"$SCRIPTPATH/configure_dev-deps.sh"
-
diff --git a/scripts/create_and_deploy_recipe.sh b/scripts/create_and_deploy_recipe.sh
index 9a0e01c98..4c55edaf0 100755
--- a/scripts/create_and_deploy_recipe.sh
+++ b/scripts/create_and_deploy_recipe.sh
@@ -40,6 +40,7 @@ FORCE_OPTION=""
SCHEMA_MODIFIER=""
BUCKET=""
SKIP_BUILD=""
+BOOTSTRAP=""
while [ "$1" != "" ]; do
case "$1" in
@@ -73,6 +74,9 @@ while [ "$1" != "" ]; do
shift
BUCKET="$1"
;;
+ --skip-dbfiles)
+ BOOTSTRAP="false"
+ ;;
--skip-build)
SKIP_BUILD="true"
;;
@@ -108,7 +112,7 @@ if [[ "${SKIP_BUILD}" != "true" || ! -f ${GOPATH}/bin/netgoal ]]; then
fi
# Generate the nodecfg package directory
-${GOPATH}/bin/netgoal build -r "${ROOTDIR}" -n "${NETWORK}" --recipe "${RECIPEFILE}" "${FORCE_OPTION}" -m "${SCHEMA_MODIFIER}"
+${GOPATH}/bin/netgoal build -r "${ROOTDIR}" -n "${NETWORK}" --recipe "${RECIPEFILE}" "${FORCE_OPTION}" -m "${SCHEMA_MODIFIER}" -b=${BOOTSTRAP:-true}
# Package and upload the config package
export S3_RELEASE_BUCKET="${S3_RELEASE_BUCKET}"
diff --git a/scripts/install_linux_deps.sh b/scripts/install_linux_deps.sh
new file mode 100755
index 000000000..ec7151378
--- /dev/null
+++ b/scripts/install_linux_deps.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+set -e
+
+. /etc/os-release
+DISTRIB=$ID
+
+ARCH_DEPS="boost boost-libs expect jq autoconf shellcheck sqlite python-virtualenv"
+UBUNTU_DEPS="libboost-all-dev expect jq autoconf shellcheck sqlite3 python3-venv"
+
+if [ "${DISTRIB}" = "arch" ]; then
+ pacman -S --refresh --needed --noconfirm $ARCH_DEPS
+else
+ apt-get update
+ apt-get -y install $UBUNTU_DEPS
+fi
diff --git a/scripts/release/mule/deploy/docker/docker.sh b/scripts/release/mule/deploy/docker/docker.sh
index a81a26306..f0b0ffc50 100755
--- a/scripts/release/mule/deploy/docker/docker.sh
+++ b/scripts/release/mule/deploy/docker/docker.sh
@@ -38,7 +38,8 @@ then
./build_releases.sh --tagname "$VERSION"
elif [ "$NETWORK" = betanet ]
then
- ./build_releases.sh --network betanet
+ ./build_releases.sh --network betanet
+ ./build_releases.sh --network betanet --tagname "$VERSION"
fi
popd
diff --git a/scripts/release/mule/deploy/releases_page/generate_releases_page.py b/scripts/release/mule/deploy/releases_page/generate_releases_page.py
index b5f76bbfa..af5fbd865 100755
--- a/scripts/release/mule/deploy/releases_page/generate_releases_page.py
+++ b/scripts/release/mule/deploy/releases_page/generate_releases_page.py
@@ -1,6 +1,16 @@
#!/usr/bin/env python3
-# This script builds https://releases.algorand.com/index.html.
+# This script builds https://releases.algorand.com/index.html
+#
+# For each channel (stable, beta, indexer), we download the file information
+# from the staging_bucket. Information from this bucket is used to create an
+# html block for each channel which includes all versions found.
+#
+# The releases_bucket is also read, and if the file exists there, then the
+# releases_bucket URL is used instead of the staging_bucket URL.
+#
+# All the HTML for the channels is combined to form one large release page,
+# which can then be published on our releases page.
import sys
import boto3
@@ -11,10 +21,15 @@ key_url = "https://releases.algorand.com/key.pub"
releases_bucket = "algorand-releases"
releases_prefix = "https://releases.algorand.com/"
html_tpl = "html.tpl"
+# Nit: should be styles_file
styles_url = "releases_page.css"
+# May want to call these channels instead
tokens = ["stable", "beta", "indexer"]
+
def get_stage_release_set(response):
+ # Loop through contents of STAGING_BUCKET/releases/CHANNEL/ and return
+ # all[prefix] = [file_obj1, file_obj2...]
prefix = None
all = {}
they = []
@@ -31,10 +46,15 @@ def get_stage_release_set(response):
else:
all[prefix] = they
prefix = None
+ # Why do the following instead of emptying 'they' altogether?
they = [x]
return all
+
def release_set_files(rset):
+ # Take list of file_objs, and return a files dict, keyed by filename
+ # value is a dict with keys "file" (full path), "Size", and if
+ # present, ".asc" or ".sig"
files = {}
for x in rset:
path = x["Key"]
@@ -43,17 +63,23 @@ def release_set_files(rset):
continue
didsuf = False
for suffix in (".asc", ".sig"):
+ # Check if signature file, e.g. node_beta_linux-amd64_2.5.2.tar.gz.sig
if fname.endswith(suffix):
+ # Get base filename, e.g. without '.sig'
froot = fname[:-len(suffix)]
+
fd = files.get(froot)
if fd is None:
fd = {}
files[froot] = fd
+ # key file dict by suffix, attach whole file object
fd[suffix] = x
didsuf = True
- break
+ break # end suffixes loop
if didsuf:
- continue
+ continue # go to next file in rset
+
+ # At this point we are not a sig file, so just attach raw information
fd = files.get(fname)
if fd is None:
fd = {}
@@ -62,23 +88,34 @@ def release_set_files(rset):
fd["Size"] = x["Size"]
return files
+
def get_hashes_data(s3, rset):
+ # Read all hashes files for a version and return text string
text = ""
for x in rset:
+ # x here are objects under a specific prefix
path = x["Key"]
pre, fname = path.rsplit("/", 1)
if fname.endswith(".asc"):
continue
if fname.endswith(".sig"):
continue
+
+ # We skip signature files and only process hashes files
+ # e.g. hashes_beta_linux_amd64_2.5.2
+ # We read and append all of this data in the 'text' string and return
+ # it
if fname.startswith("hashes"):
ob = s3.get_object(Bucket=staging_bucket, Key=path)
text += ob["Body"].read().decode()
return text
+
def read_hashes(fin):
+ # Read the output of get_hashes_data
by_fname = {}
for line in fin:
+ # Ignore blanks and comments
if not line:
continue
line = line.strip()
@@ -86,11 +123,21 @@ def read_hashes(fin):
continue
if line[0] == "#":
continue
+
+ # E.g.:
+ # 7e19496802ca7f3bec68ba580ccb7042
+ # algorand-beta-2.5.2-1.x86_64.rpm
hashstr, fname = line.split()
ob = by_fname.get(fname)
+
+ # If the filename is not in by_fname, create an empty dict and assign
+ # it
if not ob:
ob = {}
by_fname[fname] = ob
+
+ # if 32 chars, it's md5; 64 is sha256, 128 is sha512. Asign to dict
+ # under those keys
if len(hashstr) == 32:
ob["md5"] = hashstr
elif len(hashstr) == 64:
@@ -99,6 +146,7 @@ def read_hashes(fin):
ob["sha512"] = hashstr
return by_fname
+
def objects_by_fname(they):
out = {}
for x in they:
@@ -110,53 +158,103 @@ def objects_by_fname(they):
out[fname] = x
return out
+
def getContent(url):
with open(url, "r") as reader:
content = reader.read()
return content
+
def build_page(channels):
+ # read html_tpl and styles_url, make substitutions
html = getContent(html_tpl).replace("{styles}", getContent(styles_url))
+ # Replace each token (channel) from channels
for n in tokens:
html = html.replace("".join(["{", n, "}"]), "".join(channels[n]))
sys.stdout.write(html)
+
def get_furl(release_files, fname, skey):
+ # Pass s3://algorand-releases/ file objects; also the filename and path
+ # from s3://algorand-dev-deb-repo.
+ #
+ # If the filename is in the algorand-releases bucket, use the url from the
+ # releases bucket. Otherwise, use the URL from the
+ # s3://algorand-dev-deb-repo bucket.
+ #
+ # algorand-releases and algorand-dev-deb-repo match:
+ # https://releases.algorand.com/[rpath]
+ # Else:
+ # http://algorand-dev-deb-repo.s3-website-us-east-1.amazonaws.com/[spath]
rfpath = release_files.get(fname)
if rfpath is not None:
return releases_prefix + rfpath["Key"]
else:
return staging_prefix + skey
+
def main():
s3 = boto3.client("s3")
channels = {}
+ # Should use tokens array instead
for channel in ["stable", "beta", "indexer"]:
- staging_response = s3.list_objects_v2(Bucket=staging_bucket, Prefix="releases/" + channel + "/", MaxKeys=100)
+ # Fetch contents of e.g. s3://algorand-dev-deb-repo/releases/beta/
+ # Note: MaxKeys will limit to last 100 releases, which is more than
+ # enough. Consider dropping this to 2.
+ staging_response = s3.list_objects_v2(
+ Bucket=staging_bucket,
+ Prefix="releases/" + channel + "/", MaxKeys=100)
+
+ # Populate release_sets, e.g.:
+ # 'releases/beta/f9fa9a084_2.5.2' => [file_obj1, file_obj2, ...]
release_sets = get_stage_release_set(staging_response)
+
+ # List everything from the releases bucket s3://algorand-releases/
releases_response = s3.list_objects_v2(Bucket=releases_bucket)
+
+ # Return dict keyed by filename of file_objs from
+ # s3://algorand-releases/
release_files = objects_by_fname(releases_response["Contents"])
table = []
+ # Loop through all the releases in e.g.
+ # s3://algorand-dev-deb-repo/releases/beta/
for key, rset in release_sets.items():
+ # key: releases/beta/f9fa9a084_2.5.2
+ # rset: [file_obj1, file_obj2, ...]
+
+ # Scan rset objs and return all the hashes data as a string
hashftext = get_hashes_data(s3, rset)
+
+ # Create a dict of fhashes[filename] = hash_obj
+ # hash_obj[CHECKSUM] = HASH_STRING
+ # E.g. hash_obj['md5'] = '7e19496802ca7f3bec68ba580ccb7042'
fhashes = read_hashes(hashftext.splitlines())
+
+ # Build a dict keyed by filename with value of a dict, keyed by
+ # "file" (full path) and "Size"
files = release_set_files(rset)
for fname, info in files.items():
if "file" not in info:
continue
+
+ # Use algorand-releases URL if avail; otherwise
+ # algorand-dev-deb-repo URL
furl = get_furl(release_files, fname, info['file'])
+
ftext = '<div class="fname"><a href="{}">{}</a></div>'.format(furl, fname)
+ # sig file obj from algorand-dev-deb-repo
sig = info.get(".sig")
stext = ""
if sig is not None:
- sfname = sig["Key"].rsplit("/", 1)[-1]
+ sfname = sig["Key"].rsplit("/", 1)[-1] # filename
+ # Use algorand-releases URL if available
surl = get_furl(release_files, sfname, sig["Key"])
stext = '<a href="{}">.sig</a>'.format(surl)
size = info.get("Size", "")
@@ -172,12 +270,12 @@ def main():
table.append("".join(tbody))
# Only add the spacer *after* every set.
- # It's not readily apparent to me why `indexer` would have a dict with a single
- # item. This needs additional investigation.
+ # It's not readily apparent to me why `indexer` would have a dict
+ # with a single item. This needs additional investigation.
#
- # For instance, when creating the "indexer" table, the first line was empty b/c
- # it added a spacer. This was b/c there were two dicts and the first only
- # contained one item, which was useless.
+ # For instance, when creating the "indexer" table, the first line
+ # was empty b/c it added a spacer. This was b/c there were two
+ # dicts and the first only contained one item, which was useless.
#
# For now, just ignore those dicts.
if len(files.items()) > 1:
@@ -187,6 +285,6 @@ def main():
build_page(channels)
+
if __name__ == "__main__":
main()
-
diff --git a/scripts/release/mule/package/deb/package.sh b/scripts/release/mule/package/deb/package.sh
index bf6a5dacf..6cd9f653e 100755
--- a/scripts/release/mule/package/deb/package.sh
+++ b/scripts/release/mule/package/deb/package.sh
@@ -12,7 +12,7 @@ if [ -z "$NETWORK" ]; then
exit 1
fi
-CHANNEL=$("./scripts/release/mule/common/get_channel.sh" "$NETWORK")
+CHANNEL=${CHANNEL:-$(./scripts/release/mule/common/get_channel.sh "$NETWORK")}
VERSION=${VERSION:-$(./scripts/compute_build_number.sh -f)}
# A make target in Makefile.mule may pass the name as an argument.
PACKAGE_NAME="$1"
diff --git a/shared/pingpong/accounts.go b/shared/pingpong/accounts.go
index 877994f9d..a58aa4ce6 100644
--- a/shared/pingpong/accounts.go
+++ b/shared/pingpong/accounts.go
@@ -130,7 +130,9 @@ func throttleTransactionRate(startTime time.Time, cfg PpConfig, totalSent uint64
// Step 1) Create X assets for each of the participant accounts
// Step 2) For each participant account, opt-in to assets of all other participant accounts
// Step 3) Evenly distribute the assets across all participant accounts
-func prepareAssets(accounts map[string]uint64, client libgoal.Client, cfg PpConfig) (resultAssetMaps map[uint64]v1.AssetParams, optIns map[uint64][]string, err error) {
+func (pps *WorkerState) prepareAssets(assetAccounts map[string]uint64, client libgoal.Client) (resultAssetMaps map[uint64]v1.AssetParams, optIns map[uint64][]string, err error) {
+ accounts := assetAccounts
+ cfg := pps.cfg
proto, err := getProto(client)
if err != nil {
return
@@ -353,7 +355,7 @@ func prepareAssets(accounts map[string]uint64, client libgoal.Client, cfg PpConf
fmt.Printf("Distributing assets from %v to %v \n", creator, addr)
}
- tx, sendErr := constructTxn(creator, addr, cfg.MaxFee, assetAmt, k, CreatablesInfo{}, client, cfg)
+ tx, sendErr := pps.constructTxn(creator, addr, cfg.MaxFee, assetAmt, k, client)
if sendErr != nil {
fmt.Printf("Cannot transfer asset %v from account %v\n", k, creator)
err = sendErr
diff --git a/shared/pingpong/config.go b/shared/pingpong/config.go
index 48b3586e4..21841ed20 100644
--- a/shared/pingpong/config.go
+++ b/shared/pingpong/config.go
@@ -61,6 +61,11 @@ type PpConfig struct {
AppLocalKeys uint32
Rekey bool
MaxRuntime time.Duration
+
+ // asset spam; make lots of NFT ASAs
+ NftAsaPerSecond uint32 // e.g. 100
+ NftAsaPerAccount uint32 // 0..999
+ NftAsaAccountInFlight uint32
}
// DefaultConfig object for Ping Pong
@@ -88,6 +93,9 @@ var DefaultConfig = PpConfig{
AppProgHashSize: "sha256",
Rekey: false,
MaxRuntime: 0,
+
+ NftAsaAccountInFlight: 5,
+ NftAsaPerAccount: 900,
}
// LoadConfigFromFile reads and loads Ping Pong configuration
diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go
index 0ffc97af2..81feac1cf 100644
--- a/shared/pingpong/pingpong.go
+++ b/shared/pingpong/pingpong.go
@@ -24,6 +24,7 @@ import (
"os"
"time"
+ "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
"github.com/algorand/go-algorand/data/basics"
@@ -38,14 +39,25 @@ type CreatablesInfo struct {
OptIns map[uint64][]string
}
+// WorkerState object holds a running pingpong worker
+type WorkerState struct {
+ cfg PpConfig
+ accounts map[string]uint64
+ cinfo CreatablesInfo
+
+ nftStartTime int64
+ localNftIndex uint64
+ nftHolders map[string]int
+}
+
// PrepareAccounts to set up accounts and asset accounts required for Ping Pong run
-func PrepareAccounts(ac libgoal.Client, initCfg PpConfig) (accounts map[string]uint64, cinfo CreatablesInfo, cfg PpConfig, err error) {
- cfg = initCfg
- accounts, cfg, err = ensureAccounts(ac, cfg)
+func (pps *WorkerState) PrepareAccounts(ac libgoal.Client) (err error) {
+ pps.accounts, pps.cfg, err = ensureAccounts(ac, pps.cfg)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "ensure accounts failed %v\n", err)
return
}
+ cfg := pps.cfg
wallet, walletErr := ac.GetUnencryptedWalletHandle()
if err != nil {
@@ -58,48 +70,49 @@ func PrepareAccounts(ac libgoal.Client, initCfg PpConfig) (accounts map[string]u
cfg.MaxAmt = 0
var assetAccounts map[string]uint64
- assetAccounts, err = prepareNewAccounts(ac, cfg, wallet, accounts)
+ assetAccounts, err = prepareNewAccounts(ac, cfg, wallet, pps.accounts)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "prepare new accounts failed: %v\n", err)
return
}
- cinfo.AssetParams, cinfo.OptIns, err = prepareAssets(assetAccounts, ac, cfg)
+ pps.cinfo.AssetParams, pps.cinfo.OptIns, err = pps.prepareAssets(assetAccounts, ac)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "prepare assets failed %v\n", err)
return
}
if !cfg.Quiet {
- for addr := range accounts {
- fmt.Printf("final prepareAccounts, account addr: %s, balance: %d\n", addr, accounts[addr])
+ for addr := range pps.accounts {
+ fmt.Printf("final prepareAccounts, account addr: %s, balance: %d\n", addr, pps.accounts[addr])
}
}
} else if cfg.NumApp > 0 {
var appAccounts map[string]uint64
- appAccounts, err = prepareNewAccounts(ac, cfg, wallet, accounts)
+ appAccounts, err = prepareNewAccounts(ac, cfg, wallet, pps.accounts)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "prepare new accounts failed: %v\n", err)
return
}
- cinfo.AppParams, cinfo.OptIns, err = prepareApps(appAccounts, ac, cfg)
+ pps.cinfo.AppParams, pps.cinfo.OptIns, err = prepareApps(appAccounts, ac, cfg)
if err != nil {
return
}
if !cfg.Quiet {
- for addr := range accounts {
- fmt.Printf("final prepareAccounts, account addr: %s, balance: %d\n", addr, accounts[addr])
+ for addr := range pps.accounts {
+ fmt.Printf("final prepareAccounts, account addr: %s, balance: %d\n", addr, pps.accounts[addr])
}
}
} else {
- err = fundAccounts(accounts, ac, cfg)
+ err = fundAccounts(pps.accounts, ac, cfg)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "fund accounts failed %v\n", err)
return
}
}
+ pps.cfg = cfg
return
}
@@ -268,8 +281,10 @@ func listSufficientAccounts(accounts map[string]uint64, minimumAmount uint64, ex
return out
}
+var logPeriod = 5 * time.Second
+
// RunPingPong starts ping pong process
-func RunPingPong(ctx context.Context, ac libgoal.Client, accounts map[string]uint64, cinfo CreatablesInfo, cfg PpConfig) {
+func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
// Infinite loop given:
// - accounts -> map of accounts to include in transfers (including src account, which we don't want to use)
// - cfg -> configuration for how to proceed
@@ -286,6 +301,7 @@ func RunPingPong(ctx context.Context, ac libgoal.Client, accounts map[string]uin
// error = fundAccounts()
// }
+ cfg := pps.cfg
var runTime time.Duration
if cfg.RunTime > 0 {
runTime = cfg.RunTime
@@ -299,6 +315,14 @@ func RunPingPong(ctx context.Context, ac libgoal.Client, accounts map[string]uin
restTime := cfg.RestTime
refreshTime := time.Now().Add(cfg.RefreshTime)
+ var nftThrottler *throttler
+ if pps.cfg.NftAsaPerSecond > 0 {
+ nftThrottler = newThrottler(20, float64(pps.cfg.NftAsaPerSecond))
+ }
+
+ lastLog := time.Now()
+ nextLog := lastLog.Add(logPeriod)
+
for {
if ctx.Err() != nil {
_, _ = fmt.Fprintf(os.Stderr, "error bad context in RunPingPong: %v\n", ctx.Err())
@@ -307,22 +331,46 @@ func RunPingPong(ctx context.Context, ac libgoal.Client, accounts map[string]uin
startTime := time.Now()
stopTime := startTime.Add(runTime)
- var totalSent, totalSucceeded uint64
- for !time.Now().After(stopTime) {
+ var totalSent, totalSucceeded, lastTotalSent uint64
+ for {
+ now := time.Now()
+ if now.After(stopTime) {
+ break
+ }
+ if now.After(nextLog) {
+ dt := now.Sub(lastLog)
+ fmt.Printf("%d sent, %0.2f/s (%d total)\n", totalSent-lastTotalSent, float64(totalSent-lastTotalSent)/dt.Seconds(), totalSent)
+ lastTotalSent = totalSent
+ for now.After(nextLog) {
+ nextLog = nextLog.Add(logPeriod)
+ }
+ lastLog = now
+ }
+
if cfg.MaxRuntime > 0 && time.Now().After(endTime) {
fmt.Printf("Terminating after max run time of %.f seconds\n", cfg.MaxRuntime.Seconds())
return
}
+ if pps.cfg.NftAsaPerSecond > 0 {
+ sent, err := pps.makeNftTraffic(ac)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error sending nft transactions: %v\n", err)
+ }
+ nftThrottler.maybeSleep(int(sent))
+ totalSent += sent
+ continue
+ }
+
minimumAmount := cfg.MinAccountFunds + (cfg.MaxAmt+cfg.MaxFee)*2
- fromList := listSufficientAccounts(accounts, minimumAmount, cfg.SrcAccount)
+ fromList := listSufficientAccounts(pps.accounts, minimumAmount, cfg.SrcAccount)
// in group tests txns are sent back and forth, so both parties need funds
if cfg.GroupSize == 1 {
minimumAmount = 0
}
- toList := listSufficientAccounts(accounts, minimumAmount, cfg.SrcAccount)
+ toList := listSufficientAccounts(pps.accounts, minimumAmount, cfg.SrcAccount)
- sent, succeeded, err := sendFromTo(fromList, toList, accounts, cinfo, ac, cfg)
+ sent, succeeded, err := pps.sendFromTo(fromList, toList, ac)
totalSent += sent
totalSucceeded += succeeded
if err != nil {
@@ -330,7 +378,7 @@ func RunPingPong(ctx context.Context, ac libgoal.Client, accounts map[string]uin
}
if cfg.RefreshTime > 0 && time.Now().After(refreshTime) {
- err = refreshAccounts(accounts, ac, cfg)
+ err = refreshAccounts(pps.accounts, ac, cfg)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "error refreshing: %v\n", err)
}
@@ -350,6 +398,11 @@ func RunPingPong(ctx context.Context, ac libgoal.Client, accounts map[string]uin
}
}
+// NewPingpong creates a new pingpong WorkerState
+func NewPingpong(cfg PpConfig) *WorkerState {
+ return &WorkerState{cfg: cfg, nftHolders: make(map[string]int)}
+}
+
func getCreatableID(cfg PpConfig, cinfo CreatablesInfo) (aidx uint64) {
if cfg.NumAsset > 0 {
rindex := rand.Intn(len(cinfo.AssetParams))
@@ -375,14 +428,96 @@ func getCreatableID(cfg PpConfig, cinfo CreatablesInfo) (aidx uint64) {
return
}
-func sendFromTo(
- fromList, toList []string, accounts map[string]uint64,
- cinfo CreatablesInfo,
- client libgoal.Client, cfg PpConfig,
+func (pps *WorkerState) fee() uint64 {
+ cfg := pps.cfg
+ fee := cfg.MaxFee
+ if cfg.RandomizeFee {
+ fee = rand.Uint64()%(cfg.MaxFee-cfg.MinFee) + cfg.MinFee
+ }
+ return fee
+}
+
+func (pps *WorkerState) makeNftTraffic(client libgoal.Client) (sentCount uint64, err error) {
+ fee := pps.fee()
+ if (len(pps.nftHolders) == 0) || ((float64(int(pps.cfg.NftAsaAccountInFlight)-len(pps.nftHolders)) / float64(pps.cfg.NftAsaAccountInFlight)) >= rand.Float64()) {
+ var addr string
+ var wallet []byte
+ wallet, err = client.GetUnencryptedWalletHandle()
+ if err != nil {
+ return
+ }
+ addr, err = client.GenerateAddress(wallet)
+ if err != nil {
+ return
+ }
+ fmt.Printf("new NFT holder %s\n", addr)
+ var proto config.ConsensusParams
+ proto, err = getProto(client)
+ if err != nil {
+ return
+ }
+ // enough for the per-asa minbalance and more than enough for the txns to create them
+ toSend := proto.MinBalance * uint64(pps.cfg.NftAsaPerAccount+1) * 2
+ pps.nftHolders[addr] = 0
+ _, err = sendPaymentFromUnencryptedWallet(client, pps.cfg.SrcAccount, addr, fee, toSend, nil)
+ if err != nil {
+ return
+ }
+ sentCount++
+ // we ran one txn above already to fund the new addr,
+ // we'll run a second txn below
+ }
+ // pick a random sender from nft holder sub accounts
+ pick := rand.Intn(len(pps.nftHolders))
+ pos := 0
+ var sender string
+ var senderNftCount int
+ for addr, nftCount := range pps.nftHolders {
+ sender = addr
+ senderNftCount = nftCount
+ if pos == pick {
+ break
+ }
+ pos++
+
+ }
+ var meta [32]byte
+ rand.Read(meta[:])
+ assetName := pps.nftSpamAssetName()
+ const totalSupply = 1
+ txn, err := client.MakeUnsignedAssetCreateTx(totalSupply, false, sender, sender, sender, sender, "ping", assetName, "", meta[:], 0)
+ if err != nil {
+ fmt.Printf("Cannot make asset create txn with meta %v\n", meta)
+ return
+ }
+ txn, err = client.FillUnsignedTxTemplate(sender, 0, 0, pps.cfg.MaxFee, txn)
+ if err != nil {
+ fmt.Printf("Cannot fill asset creation txn\n")
+ return
+ }
+ if senderNftCount+1 >= int(pps.cfg.NftAsaPerAccount) {
+ delete(pps.nftHolders, sender)
+ } else {
+ pps.nftHolders[sender] = senderNftCount + 1
+ }
+ stxn, err := signTxn(sender, txn, client, pps.cfg)
+ if err != nil {
+ return
+ }
+ sentCount++
+ _, err = client.BroadcastTransaction(stxn)
+ return
+}
+
+func (pps *WorkerState) sendFromTo(
+ fromList, toList []string,
+ client libgoal.Client,
) (sentCount, successCount uint64, err error) {
+ accounts := pps.accounts
+ cinfo := pps.cinfo
+ cfg := pps.cfg
amt := cfg.MaxAmt
- fee := cfg.MaxFee
assetsByCreator := make(map[string][]*v1.AssetParams)
for _, p := range cinfo.AssetParams {
@@ -394,9 +529,7 @@ func sendFromTo(
amt = rand.Uint64()%cfg.MaxAmt + 1
}
- if cfg.RandomizeFee {
- fee = rand.Uint64()%(cfg.MaxFee-cfg.MinFee) + cfg.MinFee
- }
+ fee := pps.fee()
to := toList[i]
if cfg.RandomizeDst {
@@ -417,7 +550,7 @@ func sendFromTo(
// generate random assetID or appId if we send asset/app txns
aidx := getCreatableID(cfg, cinfo)
// Construct single txn
- txn, consErr := constructTxn(from, to, fee, amt, aidx, cinfo, client, cfg)
+ txn, consErr := pps.constructTxn(from, to, fee, amt, aidx, client)
if consErr != nil {
err = consErr
_, _ = fmt.Fprintf(os.Stderr, "constructTxn failed: %v\n", err)
@@ -460,17 +593,17 @@ func sendFromTo(
var txn transactions.Transaction
var signer string
if j%2 == 0 {
- txn, err = constructTxn(from, to, fee, amt, 0, cinfo, client, cfg)
+ txn, err = pps.constructTxn(from, to, fee, amt, 0, client)
fromBalanceChange -= int64(txn.Fee.Raw + amt)
toBalanceChange += int64(amt)
signer = from
} else if cfg.GroupSize == 2 && cfg.Rekey {
- txn, err = constructTxn(from, to, fee, amt, 0, cinfo, client, cfg)
+ txn, err = pps.constructTxn(from, to, fee, amt, 0, client)
fromBalanceChange -= int64(txn.Fee.Raw + amt)
toBalanceChange += int64(amt)
signer = to
} else {
- txn, err = constructTxn(to, from, fee, amt, 0, cinfo, client, cfg)
+ txn, err = pps.constructTxn(to, from, fee, amt, 0, client)
toBalanceChange -= int64(txn.Fee.Raw + amt)
fromBalanceChange += int64(amt)
signer = to
@@ -553,7 +686,17 @@ func sendFromTo(
return
}
-func constructTxn(from, to string, fee, amt, aidx uint64, cinfo CreatablesInfo, client libgoal.Client, cfg PpConfig) (txn transactions.Transaction, err error) {
+func (pps *WorkerState) nftSpamAssetName() string {
+ if pps.nftStartTime == 0 {
+ pps.nftStartTime = time.Now().Unix()
+ }
+ pps.localNftIndex++
+ return fmt.Sprintf("nft%d_%d", pps.nftStartTime, pps.localNftIndex)
+}
+
+func (pps *WorkerState) constructTxn(from, to string, fee, amt, aidx uint64, client libgoal.Client) (txn transactions.Transaction, err error) {
+ cfg := pps.cfg
+ cinfo := pps.cinfo
var noteField []byte
const pingpongTag = "pingpong"
const tagLen = uint32(len(pingpongTag))
@@ -675,3 +818,53 @@ func signTxn(signer string, txn transactions.Transaction, client libgoal.Client,
}
return
}
+
+type timeCount struct {
+ when time.Time
+ count int
+}
+
+type throttler struct {
+ times []timeCount
+
+ next int
+
+ // target x per-second
+ xps float64
+
+ // rough proportional + integral control
+ iterm float64
+}
+
+func newThrottler(windowSize int, targetPerSecond float64) *throttler {
+ return &throttler{times: make([]timeCount, windowSize), xps: targetPerSecond, iterm: 0.0}
+}
+
+func (t *throttler) maybeSleep(count int) {
+ now := time.Now()
+ t.times[t.next].when = now
+ t.times[t.next].count = count
+ nn := (t.next + 1) % len(t.times)
+ t.next = nn
+ if t.times[nn].when.IsZero() {
+ return
+ }
+ dt := now.Sub(t.times[nn].when)
+ countsum := 0
+ for i, tc := range t.times {
+ if i != nn {
+ countsum += tc.count
+ }
+ }
+ rate := float64(countsum) / dt.Seconds()
+ if rate > t.xps {
+ // rate too high, slow down
+ desiredSeconds := float64(countsum) / t.xps
+ extraSeconds := desiredSeconds - dt.Seconds()
+ t.iterm += 0.1 * extraSeconds / float64(len(t.times))
+ time.Sleep(time.Duration(int64(1000000000.0 * (extraSeconds + t.iterm) / float64(len(t.times)))))
+
+ } else {
+ t.iterm *= 0.95
+ }
+}
diff --git a/test/README.md b/test/README.md
index 0061fb0f1..59e5760f6 100644
--- a/test/README.md
+++ b/test/README.md
@@ -29,7 +29,7 @@ Must run from the root project directory, `./test/scripts/e2e.sh`
## scripts/e2e_client_runner.py and scripts/e2e_subs/
-These tests are shell scripts which all run in series against a single private network.
+These tests are shell scripts which all run in parallel against a single private network.
Each script is provided with a wallet which contains a large supply of algos to use during the test.
```
@@ -48,5 +48,7 @@ optional arguments:
To run a specific test:
```
-~$ ./e2e_client_runner.py full/path/to/test_script.sh
+~$ ./e2e_client_runner.py /full/path/to/e2e_subs/test_script.sh
```
+
+Tests in the `e2e_subs/serial` directory are executed serially instead of in parallel. This should only be used when absolutely necessary.
diff --git a/test/commandandcontrol/cc_agent/component/agent.go b/test/commandandcontrol/cc_agent/component/agent.go
index 64143992a..f467c9303 100644
--- a/test/commandandcontrol/cc_agent/component/agent.go
+++ b/test/commandandcontrol/cc_agent/component/agent.go
@@ -138,7 +138,7 @@ func (status CommandStatus) String() string {
// ProcessRequest processes the command received via the CC Service
func (agent *Agent) ProcessRequest(managementServiceRequest lib.CCServiceRequest) (err error) {
- log.Infof("recieved command for %s\n", managementServiceRequest.Component)
+ log.Infof("received command for %s\n", managementServiceRequest.Component)
err = agent.ServiceConnection.WriteMessage(websocket.TextMessage, []byte(fmt.Sprintf("received request %+v ", managementServiceRequest)))
if err != nil {
log.Errorf("problem sending ack to client , %v", err)
diff --git a/test/commandandcontrol/cc_agent/component/pingPongComponent.go b/test/commandandcontrol/cc_agent/component/pingPongComponent.go
index 07aa1a5f8..155060c97 100644
--- a/test/commandandcontrol/cc_agent/component/pingPongComponent.go
+++ b/test/commandandcontrol/cc_agent/component/pingPongComponent.go
@@ -120,13 +120,11 @@ func (componentInstance *PingPongComponentInstance) startPingPong(cfg *pingpong.
log.Infof("Preparing to initialize PingPong with config: %+v\n", cfg)
- var accounts map[string]uint64
- var cinfo pingpong.CreatablesInfo
- var resultCfg pingpong.PpConfig
+ pps := pingpong.NewPingpong(*cfg)
// Initialize accounts if necessary, this may take several attempts while previous transactions to settle
for i := 0; i < 10; i++ {
- accounts, cinfo, resultCfg, err = pingpong.PrepareAccounts(ac, *cfg)
+ err = pps.PrepareAccounts(ac)
if err == nil {
break
} else {
@@ -145,7 +143,7 @@ func (componentInstance *PingPongComponentInstance) startPingPong(cfg *pingpong.
componentInstance.ctx, componentInstance.cancelFunc = context.WithCancel(context.Background())
// Kick off the real processing
- go pingpong.RunPingPong(componentInstance.ctx, ac, accounts, cinfo, resultCfg)
+ go pps.RunPingPong(componentInstance.ctx, ac)
return
}
diff --git a/test/commandandcontrol/cc_service/main.go b/test/commandandcontrol/cc_service/main.go
index 06e3ac422..e42c16933 100644
--- a/test/commandandcontrol/cc_service/main.go
+++ b/test/commandandcontrol/cc_service/main.go
@@ -103,11 +103,11 @@ func monitorAgent(ws *websocket.Conn) {
}
switch messageType {
case websocket.TextMessage:
- log.Infof("recieved text from agent: %s", message)
+ log.Infof("received text from agent: %s", message)
clientBroadcast <- message
break
default:
- log.Infof("recieved other from agent: %s", message)
+ log.Infof("received other from agent: %s", message)
break
}
}
diff --git a/test/e2e-go/cli/algod/cleanup_test.go b/test/e2e-go/cli/algod/cleanup_test.go
index e893a68e7..310a8e82d 100644
--- a/test/e2e-go/cli/algod/cleanup_test.go
+++ b/test/e2e-go/cli/algod/cleanup_test.go
@@ -28,7 +28,7 @@ import (
func TestNodeControllerCleanup(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodesPartialPartkeyOnlyWallets.json"))
diff --git a/test/e2e-go/cli/algod/stdstreams_test.go b/test/e2e-go/cli/algod/stdstreams_test.go
index 0a8f33f47..27b87fcb8 100644
--- a/test/e2e-go/cli/algod/stdstreams_test.go
+++ b/test/e2e-go/cli/algod/stdstreams_test.go
@@ -44,7 +44,7 @@ func TestAlgodLogsToFile(t *testing.T) {
}
func testNodeCreatesLogFiles(t *testing.T, nc nodecontrol.NodeController, redirect bool) {
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
stdOutFile := filepath.Join(nc.GetDataDir(), nodecontrol.StdOutFilename)
exists := util.FileExists(stdOutFile)
diff --git a/test/e2e-go/cli/goal/account_test.go b/test/e2e-go/cli/goal/account_test.go
index 7afc124c2..492b4c29d 100644
--- a/test/e2e-go/cli/goal/account_test.go
+++ b/test/e2e-go/cli/goal/account_test.go
@@ -29,7 +29,7 @@ const statusOnline = "[online]"
func TestAccountNew(t *testing.T) {
defer fixture.SetTestContext(t)()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
newAcctName := "new_account"
@@ -54,7 +54,7 @@ func TestAccountNew(t *testing.T) {
func TestAccountNewDuplicateFails(t *testing.T) {
defer fixture.SetTestContext(t)()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
newAcctName := "duplicate_account"
@@ -69,7 +69,7 @@ func TestAccountNewDuplicateFails(t *testing.T) {
func TestAccountRename(t *testing.T) {
defer fixture.SetTestContext(t)()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
initialAcctName := "initial"
newAcctName := "renamed"
@@ -99,7 +99,7 @@ func TestAccountRename(t *testing.T) {
// Importing an account multiple times should not be considered an error by goal
func TestAccountMultipleImportRootKey(t *testing.T) {
defer fixture.SetTestContext(t)()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
walletName := ""
createUnencryptedWallet := false
diff --git a/test/e2e-go/cli/goal/clerk_test.go b/test/e2e-go/cli/goal/clerk_test.go
index 2ee2a618d..5ddc11d63 100644
--- a/test/e2e-go/cli/goal/clerk_test.go
+++ b/test/e2e-go/cli/goal/clerk_test.go
@@ -22,11 +22,13 @@ import (
"time"
"github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/test/framework/fixtures"
)
func TestClerkSendNoteEncoding(t *testing.T) {
defer fixture.SetTestContext(t)()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
// wait for consensus on first round prior to sending transactions, time out after 2 minutes
err := fixture.WaitForRound(2, time.Duration(2*time.Minute))
diff --git a/test/e2e-go/cli/goal/expect/README.md b/test/e2e-go/cli/goal/expect/README.md
index 3053632c2..ea3dd026d 100644
--- a/test/e2e-go/cli/goal/expect/README.md
+++ b/test/e2e-go/cli/goal/expect/README.md
@@ -56,7 +56,7 @@ There are three (optional) environment variables that can be used to control the
set the filter to be `export TESTFILTER=[b,c]ar`.
- Defaults to all tests (`.*`).
-NOTE: the file name shoud have the suffix: "Test.exp"
+NOTE: the file name should have the suffix: "Test.exp"
To run the Goal Expect test, run the following command from the top level go-algorand directory:
diff --git a/test/e2e-go/cli/goal/expect/goalAccountTest.exp b/test/e2e-go/cli/goal/expect/goalAccountTest.exp
index 0b5999cc4..b50227284 100644
--- a/test/e2e-go/cli/goal/expect/goalAccountTest.exp
+++ b/test/e2e-go/cli/goal/expect/goalAccountTest.exp
@@ -32,6 +32,33 @@ if { [catch {
# Determine primary account
set PRIMARY_ACCOUNT_ADDRESS [::AlgorandGoal::GetHighestFundedAccountForWallet $PRIMARY_WALLET_NAME $TEST_PRIMARY_NODE_DIR]
+ # try to generate an expired transaction for 5 times before giving up.
+ set TEST_TRANSACTION_EXPIRATION 5
+ while {$TEST_TRANSACTION_EXPIRATION > 0} {
+ # Get the lastest block
+ set LAST_COMMITTED_BLOCK [::AlgorandGoal::GetNodeLastCommittedBlock $TEST_PRIMARY_NODE_DIR]
+
+ # test that sending a transaction where the last round is equal to the current round end up resulting in "Transaction %s expired before it could be included in a block" error.
+ spawn goal clerk send -a 10 --fee 1000 --firstvalid [expr {$LAST_COMMITTED_BLOCK + 1}] --lastvalid [expr {$LAST_COMMITTED_BLOCK + 1}] -f $PRIMARY_ACCOUNT_ADDRESS -t $PRIMARY_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR
+ expect {
+ timeout { close; ::AlgorandGoal::Abort "goal clerk send timeout" }
+ -re {Transaction ([A-Z0-9]+) expired before it could be included in a block} {
+ break;
+ close;
+ }
+ -re {Transaction ([A-Z0-9]+) kicked out of local node pool} {
+ # this is a legit possible case, so just keep iterating if we hit this one.
+ close;
+ }
+ -re {Couldn't broadcast tx with algod: HTTP 400 Bad Request: TransactionPool.Remember: txn dead: round ([0-9]+) outside of ([0-9]+)--([0-9]+)} {
+ # this is a legit possible case, so just keep iterating if we hit this one.
+ close;
+ }
+ eof { ::AlgorandGoal::CheckEOF "Failed to send a dummy transaction" }
+ }
+ set TEST_TRANSACTION_EXPIRATION [expr {$TEST_TRANSACTION_EXPIRATION - 1}]
+ }
+
set MN "advice pudding treat near rule blouse same whisper inner electric quit surface sunny dismiss leader blood seat clown cost exist hospital century reform able sponsor"
spawn goal account import -m $MN --datadir $TEST_PRIMARY_NODE_DIR
expect {
diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
index 7a14e84f6..8a4ee7305 100644
--- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
+++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
@@ -46,13 +46,18 @@ package require Tcl 8.0
proc ::AlgorandGoal::Abort { ERROR } {
puts "Aborting with Error: $ERROR"
- if { "$::GLOBAL_TEST_ALGO_DIR" != "" && "$::GLOBAL_TEST_ROOT_DIR" != "" } {
+ if { [info exists ::GLOBAL_TEST_ROOT_DIR] } {
# terminate child algod processes, if there are active child processes the test will hang on a test failure
- puts "GLOBAL_TEST_ALGO_DIR $::GLOBAL_TEST_ALGO_DIR"
puts "GLOBAL_TEST_ROOT_DIR $::GLOBAL_TEST_ROOT_DIR"
puts "GLOBAL_NETWORK_NAME $::GLOBAL_NETWORK_NAME"
::AlgorandGoal::StopNetwork $::GLOBAL_NETWORK_NAME $::GLOBAL_TEST_ROOT_DIR
}
+
+ if { [info exists ::GLOBAL_TEST_ALGO_DIR] } {
+ puts "GLOBAL_TEST_ALGO_DIR $::GLOBAL_TEST_ALGO_DIR"
+ ::AlgorandGoal::StopNode $::GLOBAL_TEST_ALGO_DIR
+ }
+
exit 1
}
@@ -424,11 +429,21 @@ proc ::AlgorandGoal::GetAccountRewards { WALLET_NAME ACCOUNT_ADDRESS TEST_PRIMAR
# Account Transfer
proc ::AlgorandGoal::AccountTransfer { FROM_WALLET_NAME FROM_WALLET_PASSWORD FROM_ACCOUNT_ADDRESS TRANSFER_AMOUNT TO_ACCOUNT_ADDRESS FEE_AMOUNT TEST_PRIMARY_NODE_DIR OUT_FILE } {
+ set TRANSACTION_ID [::AlgorandGoal::AccountTransferWait $FROM_WALLET_NAME $FROM_WALLET_PASSWORD $FROM_ACCOUNT_ADDRESS $TRANSFER_AMOUNT $TO_ACCOUNT_ADDRESS $FEE_AMOUNT $TEST_PRIMARY_NODE_DIR $OUT_FILE "true"]
+ return $TRANSACTION_ID
+}
+
+# Account Transfer ( with optional wait flag)
+proc ::AlgorandGoal::AccountTransferWait { FROM_WALLET_NAME FROM_WALLET_PASSWORD FROM_ACCOUNT_ADDRESS TRANSFER_AMOUNT TO_ACCOUNT_ADDRESS FEE_AMOUNT TEST_PRIMARY_NODE_DIR OUT_FILE WAIT} {
set timeout 60
if { [ catch {
set TRANSACTION_ID "NOT SET"
if { $OUT_FILE == "" } {
- spawn goal clerk send --fee $FEE_AMOUNT --wallet $FROM_WALLET_NAME --amount $TRANSFER_AMOUNT --from $FROM_ACCOUNT_ADDRESS --to $TO_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR
+ if { $WAIT == "" } {
+ spawn goal clerk send --fee $FEE_AMOUNT --wallet $FROM_WALLET_NAME --amount $TRANSFER_AMOUNT --from $FROM_ACCOUNT_ADDRESS --to $TO_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR --no-wait
+ } else {
+ spawn goal clerk send --fee $FEE_AMOUNT --wallet $FROM_WALLET_NAME --amount $TRANSFER_AMOUNT --from $FROM_ACCOUNT_ADDRESS --to $TO_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR
+ }
} else {
spawn goal clerk send --fee $FEE_AMOUNT --wallet $FROM_WALLET_NAME --amount $TRANSFER_AMOUNT --from $FROM_ACCOUNT_ADDRESS --to $TO_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR --out $OUT_FILE
}
@@ -804,7 +819,7 @@ proc ::AlgorandGoal::DeleteMultisigAccount { MULTISIG_ADDRESS TEST_PRIMARY_NODE_
}
}
-# Wait for node to reach a specific round
+# Retrieve the node last catchpoint
proc ::AlgorandGoal::GetNodeLastCatchpoint { NODE_DATA_DIR } {
set CATCHPOINT ""
if { [catch {
@@ -825,6 +840,28 @@ proc ::AlgorandGoal::GetNodeLastCatchpoint { NODE_DATA_DIR } {
return $CATCHPOINT
}
+
+# Get node's last reached round
+proc ::AlgorandGoal::GetNodeLastCommittedBlock { NODE_DATA_DIR } {
+ set COMMITTEDROUND ""
+ if { [catch {
+ # Check node status
+ puts "spawn node status"
+ spawn goal node status -d $NODE_DATA_DIR
+ expect {
+ timeout { ::AlgorandGoal::Abort "goal node status timed out" }
+ -re {Last committed block: ([0-9]+)} {regexp -- {[0-9]+} $expect_out(0,string) COMMITTEDROUND; exp_continue }
+ eof { catch wait result; if { [lindex $result 3] != 0 } { ::AlgorandGoal::Abort "failed to perform goal node status : error code [lindex $result 3]"} }
+ }
+ if { $COMMITTEDROUND == "" } {
+ ::AlgorandGoal::Abort "Last committed block entry was missing from goal node status"
+ }
+ } EXCEPTION ] } {
+ ::AlgorandGoal::Abort "ERROR in GetNodeLastCommittedBlock: $EXCEPTION"
+ }
+ return $COMMITTEDROUND
+}
+
# Start catching up to a specific catchpoint
proc ::AlgorandGoal::StartCatchup { NODE_DATA_DIR CATCHPOINT } {
if { [catch {
diff --git a/test/e2e-go/cli/goal/expect/goalNodeTest.exp b/test/e2e-go/cli/goal/expect/goalNodeTest.exp
index 6a2f94406..9bfe6b48e 100644
--- a/test/e2e-go/cli/goal/expect/goalNodeTest.exp
+++ b/test/e2e-go/cli/goal/expect/goalNodeTest.exp
@@ -30,6 +30,23 @@ if { [catch {
# Start node
::AlgorandGoal::StartNode $TEST_PRIMARY_NODE_DIR
+ # Try starting the node again, should just report the node is already running
+ set ALREADY_STARTED_MESSAGE_RECEIVED 0
+ spawn goal node start -d $TEST_PRIMARY_NODE_DIR
+ expect {
+ timeout { close; ::AlgorandGoal::Abort "goal node start unexpectedly failed" }
+ "^Algorand node was already started!" {
+ set ALREADY_STARTED_MESSAGE_RECEIVED 1
+ exp_continue
+ }
+ -re "\\S+" { close; ::AlgorandGoal::Abort "Unexpected message for goal node start on a running node" }
+ eof {
+ if {$ALREADY_STARTED_MESSAGE_RECEIVED == 0} {
+ { close; ::AlgorandGoal::Abort "eof recieved before the expected message "}
+ }
+ }
+ }
+
# Restart node
::AlgorandGoal::RestartNode $TEST_PRIMARY_NODE_DIR
diff --git a/test/e2e-go/cli/goal/expect/singleNodeNetworkTest.exp b/test/e2e-go/cli/goal/expect/singleNodeNetworkTest.exp
new file mode 100644
index 000000000..e0990ce12
--- /dev/null
+++ b/test/e2e-go/cli/goal/expect/singleNodeNetworkTest.exp
@@ -0,0 +1,72 @@
+#!/usr/bin/expect -f
+#exp_internal 1
+set err 0
+log_user 1
+
+if { [catch {
+
+ source goalExpectCommon.exp
+ set TEST_ALGO_DIR [lindex $argv 0]
+ set TEST_DATA_DIR [lindex $argv 1]
+
+ puts "TEST_ALGO_DIR: $TEST_ALGO_DIR"
+ puts "TEST_DATA_DIR: $TEST_DATA_DIR"
+
+ set TIME_STAMP [clock seconds]
+
+ set TEST_ROOT_DIR $TEST_ALGO_DIR/root
+ set TEST_PRIMARY_NODE_DIR $TEST_ROOT_DIR/Primary/
+ set NETWORK_NAME test_net_expect_$TIME_STAMP
+ set NETWORK_TEMPLATE "$TEST_DATA_DIR/nettemplates/SingleNodeNetwork.json"
+
+ # Create network
+ ::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+
+ # Start network
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
+
+ # Determine primary account
+ set PRIMARY_WALLET_NAME unencrypted-default-wallet
+ set PRIMARY_ACCOUNT_ADDRESS [::AlgorandGoal::GetHighestFundedAccountForWallet $PRIMARY_WALLET_NAME $TEST_PRIMARY_NODE_DIR]
+
+ # Check the primary account balance.
+ set INITIAL_ACCOUNT_BALANCE [::AlgorandGoal::GetAccountBalance $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR]
+
+ # set the destination as a random address
+ set DEST_ACCOUNT "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ"
+
+ # send 1000 messages and wait for them to get applied.
+ set EXPECTED_BALANCE $INITIAL_ACCOUNT_BALANCE
+ set TRANSFER_AMOUNT_BASE 1000000
+ set FEE_AMOUNT 1000
+ set TRANSACTION_COUNT 10
+ for {set txIdx 0} {$txIdx < $TRANSACTION_COUNT} {incr txIdx 1} {
+ set TRANSFER_AMOUNT [expr $TRANSFER_AMOUNT_BASE+$txIdx]
+ set TRANSACTION_ID [::AlgorandGoal::AccountTransferWait $PRIMARY_WALLET_NAME "" $PRIMARY_ACCOUNT_ADDRESS $TRANSFER_AMOUNT $DEST_ACCOUNT $FEE_AMOUNT $TEST_PRIMARY_NODE_DIR "" ""]
+ set EXPECTED_BALANCE [expr $EXPECTED_BALANCE-$FEE_AMOUNT-$TRANSFER_AMOUNT]
+ }
+
+ # Get node last committed round
+ set LAST_COMMITTED_ROUND [::AlgorandGoal::GetNodeLastCommittedBlock $TEST_PRIMARY_NODE_DIR]
+
+ # Wait for node to advance to next round.
+ ::AlgorandGoal::WaitForRound [expr $LAST_COMMITTED_ROUND+2] $TEST_PRIMARY_NODE_DIR
+
+ # Check the primary account balance.
+ set ACCOUNT_BALANCE [::AlgorandGoal::GetAccountBalance $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR]
+
+ # Shutdown the network
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
+
+ if { $EXPECTED_BALANCE != $ACCOUNT_BALANCE } {
+ puts "Node was supposed to have a balance of $EXPECTED_BALANCE but ended up with a balance of $ACCOUNT_BALANCE"
+ exit 1
+ }
+
+ puts "Single Node Network Goal Test Successful"
+
+ exit 0
+} EXCEPTION] } {
+ puts "ERROR in singleNodeNetworkTest: $EXCEPTION"
+ exit 1
+}
diff --git a/test/e2e-go/cli/goal/expect/tealConsensusTest.exp b/test/e2e-go/cli/goal/expect/tealConsensusTest.exp
new file mode 100755
index 000000000..c8f7e84a2
--- /dev/null
+++ b/test/e2e-go/cli/goal/expect/tealConsensusTest.exp
@@ -0,0 +1,107 @@
+#!/usr/bin/expect -f
+set err 0
+log_user 1
+
+# put a TEAL program into f, with at least size and cost.
+proc teal {f v size cost {prefix ""}} {
+ set CHAN [open $f w]
+ puts $CHAN "#pragma version $v\n"
+ puts $CHAN $prefix
+ for {set i 5} {$i < $size} {incr i 2} {
+ puts $CHAN "int 1\npop\n"
+ }
+
+ if {$cost > [expr $size * 2]} {
+ puts $CHAN "byte 0x1234\n"
+ for {set i [expr $size * 2]} {$i < $cost} {incr i 130} {
+ puts $CHAN "keccak256\n"
+ }
+ puts $CHAN "pop\n"
+ }
+ puts $CHAN "int 1\n"
+ close $CHAN
+}
+
+if { [catch {
+ source goalExpectCommon.exp
+ set TEST_ALGO_DIR [lindex $argv 0]
+ set TEST_DATA_DIR [lindex $argv 1]
+
+ puts "TEST_ALGO_DIR: $TEST_ALGO_DIR"
+ puts "TEST_DATA_DIR: $TEST_DATA_DIR"
+
+ set TIME_STAMP [clock seconds]
+
+ set TEST_ROOT_DIR $TEST_ALGO_DIR/root
+ set TEST_PRIMARY_NODE_DIR $TEST_ROOT_DIR/Primary/
+ set NETWORK_NAME test_net_expect_$TIME_STAMP
+ set NETWORK_TEMPLATE "$TEST_DATA_DIR/nettemplates/TwoNodes50EachFuture.json"
+
+ # Create network
+ ::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+
+ # Start network
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
+
+
+ # Test various program length limits during compile
+
+ teal "$TEST_ROOT_DIR/small-sig.teal" 2 100 1
+ spawn goal clerk compile "$TEST_ROOT_DIR/small-sig.teal"
+ expect {
+ -re {[A-Z2-9]{58}} { set SMALL_SIG $expect_out(0,string) }
+ "\n" { ::AlgorandGoal::Abort $expect_out(buffer) }
+ }
+
+ teal "$TEST_ROOT_DIR/big-sig.teal" 2 1001 1
+ spawn goal clerk compile "$TEST_ROOT_DIR/big-sig.teal"
+ expect {
+ -re {[A-Z2-9]{58}} { ::AlgorandGoal::Abort "hash" }
+ -re {.*logicsig program size too large} { puts "bigsigcheck: pass" }
+ "\n" { ::AlgorandGoal::Abort $expect_out(buffer) }
+ }
+
+ teal "$TEST_ROOT_DIR/barely-fits-app.teal" 2 1001 1 "int 0\nbalance\npop\n"
+ spawn goal clerk compile "$TEST_ROOT_DIR/barely-fits-app.teal"
+ expect {
+ -re {[A-Z2-9]{58}} { puts "hash $expect_out(0,string)" }
+ "\n" { ::AlgorandGoal::Abort $expect_out(buffer) }
+ }
+
+ teal "$TEST_ROOT_DIR/big-app.teal" 2 1025 1 "int 0\nbalance\npop\n"
+ spawn goal clerk compile "$TEST_ROOT_DIR/big-app.teal"
+ expect {
+ -re {[A-Z2-9]{58}} { ::AlgorandGoal::Abort "hash" }
+ -re {.*app program size too large} { puts "bigappcheck: pass" }
+ "\n" { ::AlgorandGoal::Abort $expect_out(buffer) }
+ }
+
+ # Test cost limits during dryrun
+ exec goal clerk send -F "$TEST_ROOT_DIR/small-sig.teal" -t GXBNLU4AXQABPLHXJDMTG2YXSDT4EWUZACT7KTPFXDQW52XPTIUS5OZ5HQ -a 100 -d $TEST_PRIMARY_NODE_DIR -o $TEST_ROOT_DIR/small-sig.tx
+ spawn goal clerk dryrun -t $TEST_ROOT_DIR/small-sig.tx
+ expect {
+ " - pass -" { puts "small-sig dryrun pass" }
+ "REJECT" { ::AlgorandGoal::Abort $expect_out(buffer) }
+ "static cost budget" { ::AlgorandGoal::Abort $expect_out(buffer) }
+ }
+
+ teal "$TEST_ROOT_DIR/slow-sig.teal" 2 1 20001
+ exec goal clerk compile "$TEST_ROOT_DIR/slow-sig.teal"
+ exec goal clerk send -F "$TEST_ROOT_DIR/slow-sig.teal" -t GXBNLU4AXQABPLHXJDMTG2YXSDT4EWUZACT7KTPFXDQW52XPTIUS5OZ5HQ -a 100 -d $TEST_PRIMARY_NODE_DIR -o $TEST_ROOT_DIR/slow-sig.tx
+ spawn goal clerk dryrun -P future -t $TEST_ROOT_DIR/slow-sig.tx # Should succeed Check, fail Eval
+ expect {
+ "dynamic cost budget" { puts "slow-sig dryrun pass" }
+ " - pass -" { ::AlgorandGoal::Abort $expect_out(buffer) }
+ "REJECT" { ::AlgorandGoal::Abort $expect_out(buffer) }
+ }
+
+ # Shutdown the network
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
+
+ puts "TEAL Consensus Test Successful"
+
+ exit 0
+
+} EXCEPTION ] } {
+ ::AlgorandGoal::Abort "ERROR in tealConsensusTest: $EXCEPTION"
+}
diff --git a/test/e2e-go/cli/goal/node_cleanup_test.go b/test/e2e-go/cli/goal/node_cleanup_test.go
index 00d06c00b..857ad76c3 100644
--- a/test/e2e-go/cli/goal/node_cleanup_test.go
+++ b/test/e2e-go/cli/goal/node_cleanup_test.go
@@ -22,11 +22,12 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/nodecontrol"
+ "github.com/algorand/go-algorand/test/framework/fixtures"
)
func TestGoalNodeCleanup(t *testing.T) {
defer fixture.SetTestContext(t)()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
primaryDir := fixture.PrimaryDataDir()
nc := nodecontrol.MakeNodeController(fixture.GetBinDir(), primaryDir)
diff --git a/test/e2e-go/cli/perf/libgoal_test.go b/test/e2e-go/cli/perf/libgoal_test.go
index 8ebc471df..817ddeef5 100644
--- a/test/e2e-go/cli/perf/libgoal_test.go
+++ b/test/e2e-go/cli/perf/libgoal_test.go
@@ -34,19 +34,20 @@ func BenchmarkLibGoalPerf(b *testing.B) {
binDir := fixture.GetBinDir()
c, err := libgoal.MakeClientWithBinDir(binDir, fixture.PrimaryDataDir(), fixture.PrimaryDataDir(), libgoal.FullClient)
- require.NoError(b, err)
+ a := require.New(fixtures.SynchronizedTest(b))
+ a.NoError(err)
b.Run("algod", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := c.AlgodVersions()
- require.NoError(b, err)
+ a.NoError(err)
}
})
b.Run("kmd", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := c.GetUnencryptedWalletHandle()
- require.NoError(b, err)
+ a.NoError(err)
}
})
}
diff --git a/test/e2e-go/cli/perf/payment_test.go b/test/e2e-go/cli/perf/payment_test.go
index ed785fdba..bf5a7e8a1 100644
--- a/test/e2e-go/cli/perf/payment_test.go
+++ b/test/e2e-go/cli/perf/payment_test.go
@@ -35,21 +35,23 @@ func BenchmarkSendPayment(b *testing.B) {
defer fixture.Shutdown()
binDir := fixture.GetBinDir()
+ a := require.New(fixtures.SynchronizedTest(b))
+
c, err := libgoal.MakeClientWithBinDir(binDir, fixture.PrimaryDataDir(), fixture.PrimaryDataDir(), libgoal.FullClient)
- require.NoError(b, err)
+ a.NoError(err)
wallet, err := c.GetUnencryptedWalletHandle()
- require.NoError(b, err)
+ a.NoError(err)
addrs, err := c.ListAddresses(wallet)
- require.NoError(b, err)
- require.True(b, len(addrs) > 0)
+ a.NoError(err)
+ a.True(len(addrs) > 0)
addr := addrs[0]
b.Run("getwallet", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err = c.GetUnencryptedWalletHandle()
- require.NoError(b, err)
+ a.NoError(err)
}
})
@@ -59,14 +61,14 @@ func BenchmarkSendPayment(b *testing.B) {
var nonce [8]byte
crypto.RandBytes(nonce[:])
tx, err = c.ConstructPayment(addr, addr, 1, 1, nonce[:], "", [32]byte{}, 0, 0)
- require.NoError(b, err)
+ a.NoError(err)
}
})
b.Run("signtxn", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err = c.SignTransactionWithWallet(wallet, nil, tx)
- require.NoError(b, err)
+ a.NoError(err)
}
})
@@ -75,7 +77,7 @@ func BenchmarkSendPayment(b *testing.B) {
var nonce [8]byte
crypto.RandBytes(nonce[:])
_, err := c.SendPaymentFromWallet(wallet, nil, addr, addr, 1, 1, nonce[:], "", 0, 0)
- require.NoError(b, err)
+ a.NoError(err)
}
})
}
diff --git a/test/e2e-go/cli/tealdbg/expect/tealdbgSpinoffTest.exp b/test/e2e-go/cli/tealdbg/expect/tealdbgSpinoffTest.exp
index d18315cdc..9be2c95ba 100644
--- a/test/e2e-go/cli/tealdbg/expect/tealdbgSpinoffTest.exp
+++ b/test/e2e-go/cli/tealdbg/expect/tealdbgSpinoffTest.exp
@@ -56,6 +56,6 @@ if { [catch {
}
} EXCEPTION ] } {
- puts "ERROR in teadbgTest: $EXCEPTION"
+ puts "ERROR in tealdbgTest: $EXCEPTION"
exit 1
}
diff --git a/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp b/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp
index 7be2f9066..a4fc773eb 100644
--- a/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp
+++ b/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp
@@ -2,29 +2,14 @@
set err 0
log_user 1
-if { [catch {
-
- set TEST_ALGO_DIR [lindex $argv 0]
- set timeout 30
-
- set TEST_DIR $TEST_ALGO_DIR
- exec mkdir -p $TEST_DIR
-
- set TEAL_PROG_FILE "$TEST_DIR/trivial.teal"
-
- # this is ConsensusV25
- set PROTOCOL_VERSION_2 "https://github.com/algorandfoundation/specs/tree/bea19289bf41217d2c0af30522fa222ef1366466"
-
- # this is ConsensusV26
- set PROTOCOL_VERSION_3 "https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff"
-
- # run the test using version 2:
-
- exec printf "#pragma version 2\nint 1\ndup\n+\n" > $TEAL_PROG_FILE
+# workaround for scoping issue in TestTealdbg and setting URL inside expect_background's re scope
+set URL ""
+proc TestTealdbg { TEAL_PROG_FILE PROTOCOL_VERSION ARGS } {
+ variable URL
set URL ""
set PASSED 0
- spawn tealdbg debug -v $TEAL_PROG_FILE -p $PROTOCOL_VERSION_2
+ spawn tealdbg debug -v $TEAL_PROG_FILE -p $PROTOCOL_VERSION {*}[lrange $ARGS 0 end]
expect_background {
timeout { puts "tealdbg debug timed out"; exit 1 }
-re {CDT debugger listening on: (ws://[.a-z0-9:/]+)} { set URL $expect_out(1,string); }
@@ -63,55 +48,50 @@ if { [catch {
puts "Shutting down tealdbg"
close -i $tealdbg_spawn_id
- exec rm $TEAL_PROG_FILE
+}
- # run the test using version 3:
+if { [catch {
- exec printf "#pragma version 3\nint 1\ndup\n+\n" > $TEAL_PROG_FILE
+ set TEST_ALGO_DIR [lindex $argv 0]
+ set timeout 30
- set URL ""
- set PASSED 0
- spawn tealdbg debug -v $TEAL_PROG_FILE -p $PROTOCOL_VERSION_3
- expect_background {
- timeout { puts "tealdbg debug timed out"; exit 1 }
- -re {CDT debugger listening on: (ws://[.a-z0-9:/]+)} { set URL $expect_out(1,string); }
- eof {
- catch wait result
- if { [lindex $result 3] != 0 } {
- puts "returned error code is [lindex $result 3]"
- exit 1
- }
- }
- }
- set tealdbg_spawn_id $spawn_id
+ set TEST_DIR $TEST_ALGO_DIR
+ exec mkdir -p $TEST_DIR
- # wait until URL is set or timeout
- set it 0
- while { $it < 10 && $URL == "" } {
- set it [expr {$it + 1}]
- sleep 1
- }
- if { $URL == "" } {
- puts "ERROR: URL is not set after timeout"
- exit 1
- }
+ set TEAL_PROG_FILE "$TEST_DIR/trivial.teal"
+ # this is ConsensusV25
+ set PROTOCOL_VERSION_2 "https://github.com/algorandfoundation/specs/tree/bea19289bf41217d2c0af30522fa222ef1366466"
- spawn cdtmock $URL
+ # this is ConsensusV26
+ set PROTOCOL_VERSION_3 "https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff"
+
+ # run the test using version 2 on protocol version 2:
+ exec printf "#pragma version 2\nint 1\ndup\n+\n" > $TEAL_PROG_FILE
+ TestTealdbg $TEAL_PROG_FILE $PROTOCOL_VERSION_2 ""
+
+ # run the test using version 2 on protocol version 3:
+ TestTealdbg $TEAL_PROG_FILE $PROTOCOL_VERSION_3 "--remote-debugging-port 9392 --listen 127.0.0.1"
+
+ # run the test using version 3 on protocol version 3:
+ exec printf "#pragma version 3\nint 1\ndup\n+\n" > $TEAL_PROG_FILE
+ TestTealdbg $TEAL_PROG_FILE $PROTOCOL_VERSION_3 "--remote-debugging-port 9392 --listen 127.0.0.1"
+
+ # run the test using version 3 on protocol version 2 (this should fail)
+ set FAILED 0
+ spawn tealdbg debug -v $TEAL_PROG_FILE -p $PROTOCOL_VERSION_2 --remote-debugging-port 9392 --listen 127.0.0.1
expect {
- timeout { puts "cdt-mock debug timed out"; exit 1 }
- -re {Debugger.paused} { set PASSED 1; }
- eof { catch wait result; if { [lindex $result 3] == 0 } { puts "Expected non-zero exit code"; exit [lindex $result 3] } }
+ timeout { puts "tealdbg debug timed out"; exit 1 }
+ -re {Debug error: Program version \([0-9]+\) is beyond the maximum supported protocol version \([0-9]+\)} { set FAILED 1; close }
}
-
- if { $PASSED == 0 } {
- puts "ERROR: have not found 'Debugger.paused' in cdtmock output"
+ if { $FAILED == 0 } {
+ puts "ERROR: the command should have failed"
exit 1
}
+ puts "The command failed as expected"
- puts "Shutting down tealdbg"
- close -i $tealdbg_spawn_id
+ exec rm $TEAL_PROG_FILE
} EXCEPTION ] } {
- puts "ERROR in teadbgTest: $EXCEPTION"
+ puts "ERROR in tealdbgTest: $EXCEPTION"
exit 1
}
diff --git a/test/e2e-go/features/auction/auctionCancel_test.go b/test/e2e-go/features/auction/auctionCancel_test.go
index 2d652b38a..b7b03d8aa 100644
--- a/test/e2e-go/features/auction/auctionCancel_test.go
+++ b/test/e2e-go/features/auction/auctionCancel_test.go
@@ -31,7 +31,7 @@ func TestStartAndCancelAuctionNoBids(t *testing.T) {
t.Skip()
}
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.AuctionFixture
netTemplate := filepath.Join("nettemplates", "ThreeNodesEvenDist.json")
auctionParamFile := filepath.Join("auctions", "AuctionParams_1.json")
@@ -62,7 +62,7 @@ func TestStartAndCancelAuctionOneUserTenBids(t *testing.T) {
t.Skip()
}
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.AuctionFixture
netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json")
auctionParamFile := filepath.Join("auctions", "AuctionParams_1.json")
@@ -122,7 +122,7 @@ func TestStartAndCancelAuctionOneUserTenBids(t *testing.T) {
func TestStartAndCancelAuctionEarlyOneUserTenBids(t *testing.T) {
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.AuctionFixture
netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json")
auctionParamFile := filepath.Join("auctions", "AuctionParams_1.json")
diff --git a/test/e2e-go/features/auction/auctionErrors_test.go b/test/e2e-go/features/auction/auctionErrors_test.go
index 3251223f3..1ef1ef6a2 100644
--- a/test/e2e-go/features/auction/auctionErrors_test.go
+++ b/test/e2e-go/features/auction/auctionErrors_test.go
@@ -35,7 +35,7 @@ func TestInvalidDeposit(t *testing.T) {
t.Skip()
}
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.AuctionFixture
netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json")
@@ -123,7 +123,7 @@ func TestNoDepositAssociatedWithBid(t *testing.T) {
t.Skip()
}
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.AuctionFixture
netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json")
@@ -192,7 +192,7 @@ func TestNoDepositAssociatedWithBid(t *testing.T) {
func TestDeadbeatBid(t *testing.T) {
// an error is expected when an account attempts to overbid
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.AuctionFixture
netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json")
@@ -290,7 +290,7 @@ func TestStartAndPartitionAuctionTenUsersTenBidsEach(t *testing.T) {
t.Skip()
}
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.AuctionFixture
netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json")
auctionParamFile := filepath.Join("auctions", "AuctionParams_1.json")
@@ -299,7 +299,7 @@ func TestStartAndPartitionAuctionTenUsersTenBidsEach(t *testing.T) {
libGoalClient := fixture.GetLibGoalClient()
minTxnFee, minAcctBalance, err := fixture.CurrentMinFeeAndBalance()
- require.NoError(t, err)
+ r.NoError(err)
// create wallets to bid with, and note their balances before the auction.
wallets, _ := fixture.GetWalletsSortedByBalance()
diff --git a/test/e2e-go/features/auction/basicAuction_test.go b/test/e2e-go/features/auction/basicAuction_test.go
index d833ae7d9..4c055763b 100644
--- a/test/e2e-go/features/auction/basicAuction_test.go
+++ b/test/e2e-go/features/auction/basicAuction_test.go
@@ -43,7 +43,7 @@ func TestStartAndEndAuctionNoBids(t *testing.T) {
t.Skip()
}
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.AuctionFixture
netTemplate := filepath.Join("nettemplates", "ThreeNodesEvenDist.json")
auctionParamFile := filepath.Join("auctions", "AuctionParams_1.json")
@@ -84,7 +84,7 @@ func TestStartAndEndAuctionOneUserOneBid(t *testing.T) {
t.Skip()
}
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.AuctionFixture
netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json")
auctionParamFile := filepath.Join("auctions", "AuctionParams_1.json")
@@ -153,7 +153,7 @@ func TestStartAndEndAuctionOneUserTenBids(t *testing.T) {
t.Skip()
}
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.AuctionFixture
netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json")
auctionParamFile := filepath.Join("auctions", "AuctionParams_1.json")
@@ -222,7 +222,7 @@ func TestStartAndEndAuctionOneUserTenBids(t *testing.T) {
func TestStartAndEndAuctionTenUsersOneBidEach(t *testing.T) {
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.AuctionFixture
netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json")
auctionParamFile := filepath.Join("auctions", "AuctionParams_1.json")
@@ -317,7 +317,7 @@ func TestStartAndEndAuctionTenUsersTenBidsEach(t *testing.T) {
t.Skip()
}
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.AuctionFixture
netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json")
auctionParamFile := filepath.Join("auctions", "AuctionParams_1.json")
@@ -326,7 +326,7 @@ func TestStartAndEndAuctionTenUsersTenBidsEach(t *testing.T) {
libGoalClient := fixture.GetLibGoalClient()
minTxnFee, minAcctBalance, err := fixture.CurrentMinFeeAndBalance()
- require.NoError(t, err)
+ r.NoError(err)
// create wallets to bid with, and note their balances before the auction.
wallets, _ := fixture.GetWalletsSortedByBalance()
@@ -414,7 +414,7 @@ func TestDecayingPrice(t *testing.T) {
t.Skip()
}
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.AuctionFixture
netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json")
// "price goes from 10 to 1, decreasing by 1 each block for 10 blocks."
diff --git a/test/e2e-go/features/catchup/basicCatchup_test.go b/test/e2e-go/features/catchup/basicCatchup_test.go
index 7eb7a9b0c..32370a928 100644
--- a/test/e2e-go/features/catchup/basicCatchup_test.go
+++ b/test/e2e-go/features/catchup/basicCatchup_test.go
@@ -35,7 +35,7 @@ func TestBasicCatchup(t *testing.T) {
t.Skip()
}
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
// Overview of this test:
// Start a two-node network (primary has 0%, secondary has 100%)
@@ -78,13 +78,14 @@ func TestBasicCatchup(t *testing.T) {
func TestCatchupOverGossip(t *testing.T) {
t.Parallel()
+ syncTest := fixtures.SynchronizedTest(t)
supportedVersions := network.SupportedProtocolVersions
- require.LessOrEqual(t, len(supportedVersions), 3)
+ require.LessOrEqual(syncTest, len(supportedVersions), 3)
// ledger node upgraded version, fetcher node upgraded version
// Run with the default values. Instead of "", pass the default value
- // to exercise loading it from the config file.
- runCatchupOverGossip(t, supportedVersions[0], supportedVersions[0])
+ // to exercise loading it from the config file.
+ runCatchupOverGossip(syncTest, supportedVersions[0], supportedVersions[0])
for i := 1; i < len(supportedVersions); i++ {
runCatchupOverGossip(t, supportedVersions[i], "")
runCatchupOverGossip(t, "", supportedVersions[i])
@@ -92,7 +93,7 @@ func TestCatchupOverGossip(t *testing.T) {
}
}
-func runCatchupOverGossip(t *testing.T,
+func runCatchupOverGossip(t fixtures.TestingTB,
ledgerNodeDowngradeTo,
fetcherNodeDowngradeTo string) {
@@ -111,13 +112,13 @@ func runCatchupOverGossip(t *testing.T,
// distribution for catchup so this is fine.
fixture.SetupNoStart(t, filepath.Join("nettemplates", "TwoNodes100Second.json"))
- if ledgerNodeDowngradeTo != ""{
+ if ledgerNodeDowngradeTo != "" {
// Force the node to only support v1
dir, err := fixture.GetNodeDir("Node")
a.NoError(err)
cfg, err := config.LoadConfigFromDisk(dir)
a.NoError(err)
- require.Empty(t, cfg.NetworkProtocolVersion)
+ a.Empty(cfg.NetworkProtocolVersion)
cfg.NetworkProtocolVersion = ledgerNodeDowngradeTo
cfg.SaveToDisk(dir)
}
@@ -127,7 +128,7 @@ func runCatchupOverGossip(t *testing.T,
dir := fixture.PrimaryDataDir()
cfg, err := config.LoadConfigFromDisk(dir)
a.NoError(err)
- require.Empty(t, cfg.NetworkProtocolVersion)
+ a.Empty(cfg.NetworkProtocolVersion)
cfg.NetworkProtocolVersion = fetcherNodeDowngradeTo
cfg.SaveToDisk(dir)
}
@@ -177,7 +178,7 @@ func runCatchupOverGossip(t *testing.T,
if time.Now().Sub(waitStart) > time.Minute {
// it's taking too long.
- require.FailNow(t, "Waiting too long for catchup to complete")
+ a.FailNow("Waiting too long for catchup to complete")
}
time.Sleep(50 * time.Millisecond)
@@ -198,7 +199,7 @@ func TestStoppedCatchupOnUnsupported(t *testing.T) {
t.Skip()
}
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
consensus := make(config.ConsensusProtocols)
// The following two protocols: testUnupgradedProtocol and testUnupgradedToProtocol
diff --git a/test/e2e-go/features/catchup/catchpointCatchup_test.go b/test/e2e-go/features/catchup/catchpointCatchup_test.go
index 9297f13a0..38a6ecd40 100644
--- a/test/e2e-go/features/catchup/catchpointCatchup_test.go
+++ b/test/e2e-go/features/catchup/catchpointCatchup_test.go
@@ -41,7 +41,7 @@ type nodeExitErrorCollector struct {
errors []error
messages []string
mu deadlock.Mutex
- t *testing.T
+ t fixtures.TestingTB
}
func (ec *nodeExitErrorCollector) nodeExitWithError(nc *nodecontrol.NodeController, err error) {
@@ -82,7 +82,7 @@ func TestBasicCatchpointCatchup(t *testing.T) {
if testing.Short() {
t.Skip()
}
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
log := logging.TestingLog(t)
// Overview of this test:
@@ -115,7 +115,7 @@ func TestBasicCatchpointCatchup(t *testing.T) {
var fixture fixtures.RestClientFixture
fixture.SetConsensus(consensus)
- errorsCollector := nodeExitErrorCollector{t: t}
+ errorsCollector := nodeExitErrorCollector{t: fixtures.SynchronizedTest(t)}
defer errorsCollector.Print()
// Give the second node (which starts up last) all the stake so that its proposal always has better credentials,
diff --git a/test/e2e-go/features/compactcert/compactcert_test.go b/test/e2e-go/features/compactcert/compactcert_test.go
index 690e5d710..d8148c5bb 100644
--- a/test/e2e-go/features/compactcert/compactcert_test.go
+++ b/test/e2e-go/features/compactcert/compactcert_test.go
@@ -36,7 +36,7 @@ import (
func TestCompactCerts(t *testing.T) {
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
configurableConsensus := make(config.ConsensusProtocols)
consensusVersion := protocol.ConsensusVersion("test-fast-compactcert")
@@ -56,13 +56,36 @@ func TestCompactCerts(t *testing.T) {
restClient, err := fixture.NC.AlgodClient()
r.NoError(err)
+ node0Client := fixture.GetLibGoalClientForNamedNode("Node0")
+ node0Wallet, err := node0Client.GetUnencryptedWalletHandle()
+ r.NoError(err)
+ node0AccountList, err := node0Client.ListAddresses(node0Wallet)
+ r.NoError(err)
+ node0Account := node0AccountList[0]
+
+ node1Client := fixture.GetLibGoalClientForNamedNode("Node1")
+ node1Wallet, err := node1Client.GetUnencryptedWalletHandle()
+ r.NoError(err)
+ node1AccountList, err := node1Client.ListAddresses(node1Wallet)
+ r.NoError(err)
+ node1Account := node1AccountList[0]
+
var lastCertBlock v1.Block
libgoal := fixture.LibGoalClient
for rnd := uint64(1); rnd <= consensusParams.CompactCertRounds*4; rnd++ {
- fixture.WaitForRound(rnd, 30*time.Second)
- blk, err := libgoal.Block(rnd)
+ // send a dummy payment transaction.
+ minTxnFee, _, err := fixture.CurrentMinFeeAndBalance()
r.NoError(err)
+ _, err = node0Client.SendPaymentFromUnencryptedWallet(node0Account, node1Account, minTxnFee, rnd, nil)
+ r.NoError(err)
+
+ err = fixture.WaitForRound(rnd, 30*time.Second)
+ r.NoError(err)
+
+ blk, err := libgoal.Block(rnd)
+ r.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd)
+
t.Logf("Round %d, block %v\n", rnd, blk)
if (rnd % consensusParams.CompactCertRounds) == 0 {
@@ -127,5 +150,5 @@ func TestCompactCerts(t *testing.T) {
}
}
- r.True(lastCertBlock.Round == consensusParams.CompactCertRounds*3)
+ r.Equalf(consensusParams.CompactCertRounds*3, lastCertBlock.Round, "the expected last certificate block wasn't the one that was observed")
}
diff --git a/test/e2e-go/features/multisig/multisig_test.go b/test/e2e-go/features/multisig/multisig_test.go
index 189af4f06..c24b4bc5f 100644
--- a/test/e2e-go/features/multisig/multisig_test.go
+++ b/test/e2e-go/features/multisig/multisig_test.go
@@ -37,7 +37,7 @@ func TestBasicMultisig(t *testing.T) {
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
defer fixture.Shutdown()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
// create three addrs
client := fixture.LibGoalClient
@@ -112,7 +112,7 @@ func TestZeroThreshold(t *testing.T) {
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
defer fixture.Shutdown()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
client := fixture.LibGoalClient
walletHandle, err := client.GetUnencryptedWalletHandle()
r.NoError(err, "Getting default wallet handle should not return error")
@@ -139,7 +139,7 @@ func TestZeroSigners(t *testing.T) {
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
defer fixture.Shutdown()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
client := fixture.LibGoalClient
walletHandle, err := client.GetUnencryptedWalletHandle()
r.NoError(err, "Getting default wallet handle should not return error")
@@ -162,7 +162,7 @@ func TestDuplicateKeys(t *testing.T) {
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
defer fixture.Shutdown()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
// create one addr
client := fixture.LibGoalClient
diff --git a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
index 99a5afb4d..b6e3296ce 100644
--- a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
+++ b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
@@ -29,7 +29,7 @@ import (
func TestParticipationKeyOnlyAccountParticipatesCorrectly(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodesPartialPartkeyOnlyWallets.json"))
@@ -105,7 +105,7 @@ func TestNewAccountCanGoOnlineAndParticipate(t *testing.T) {
t.Skip() // temporary disable the test since it's failing.
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodesOneOnline.json"))
diff --git a/test/e2e-go/features/participation/overlappingParticipationKeys_test.go b/test/e2e-go/features/participation/overlappingParticipationKeys_test.go
new file mode 100644
index 000000000..e64fecfad
--- /dev/null
+++ b/test/e2e-go/features/participation/overlappingParticipationKeys_test.go
@@ -0,0 +1,209 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package participation
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/framework/fixtures"
+ "github.com/algorand/go-algorand/util/db"
+)
+
+func TestOverlappingParticipationKeys(t *testing.T) {
+ t.Parallel()
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ consensus := make(config.ConsensusProtocols)
+ shortPartKeysProtocol := config.Consensus[protocol.ConsensusCurrentVersion]
+ shortPartKeysProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ shortPartKeysProtocol.SeedLookback = 2
+ shortPartKeysProtocol.SeedRefreshInterval = 8
+ if runtime.GOARCH == "amd64" {
+ // amd64 platforms are generally quite capable, so accelerate the round times to make the test run faster.
+ shortPartKeysProtocol.AgreementFilterTimeoutPeriod0 = 1 * time.Second
+ shortPartKeysProtocol.AgreementFilterTimeout = 1 * time.Second
+ }
+ consensus[protocol.ConsensusVersion("shortpartkeysprotocol")] = shortPartKeysProtocol
+
+ var fixture fixtures.RestClientFixture
+ fixture.SetConsensus(consensus)
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "ShortParticipationKeys.json"))
+ defer fixture.Shutdown()
+
+ accountsNum := len(fixture.NodeDataDirs())
+ for _, dataDir := range fixture.NodeDataDirs() {
+ cfg, err := config.LoadConfigFromDisk(dataDir)
+ a.NoError(err)
+ cfg.ParticipationKeysRefreshInterval = 500 * time.Millisecond
+ err = cfg.SaveToDisk(dataDir)
+ a.NoError(err)
+ }
+
+ genesis, err := bookkeeping.LoadGenesisFromFile(filepath.Join(fixture.PrimaryDataDir(), "genesis.json"))
+ a.NoError(err)
+ genesisHash := crypto.HashObj(genesis)
+ rootKeys := make(map[int]*account.Root)
+ regTransactions := make(map[int]transactions.SignedTxn)
+ lastRound := uint64(64)
+
+ // prepare the participation keys ahead of time.
+ for round := uint64(1); round < lastRound; round++ {
+ if (round-1)%10 >= uint64(accountsNum) {
+ continue
+ }
+ acctIdx := (round - 1) % 10
+ txStartRound := round
+ txEndRound := txStartRound + 36 + 10
+ regStartRound := round + 32
+ regEndRound := regStartRound + 11
+ err = prepareParticipationKey(a, &fixture, acctIdx, txStartRound, txEndRound, regStartRound, regEndRound, genesisHash, rootKeys, regTransactions)
+ a.NoError(err)
+ }
+
+ fixture.Start()
+ currentRound := uint64(0)
+ fixture.AlgodClient = fixture.GetAlgodClientForController(fixture.NC)
+ for {
+ err := fixture.WaitForRoundWithTimeout(currentRound + 1)
+ a.NoError(err)
+ currentRound++
+ if (currentRound-1)%10 < uint64(accountsNum) {
+ acctIdx := (currentRound - 1) % 10
+ startRound := currentRound + 2
+ endRound := startRound + 36 + 10 - 2
+ regStartRound := currentRound + 32
+ regEndRound := regStartRound + 11
+ err = addParticipationKey(a, &fixture, acctIdx, startRound, endRound, regTransactions)
+ a.NoError(err)
+ t.Logf("[.] Round %d, Added reg key for node %d range [%d..%d]\n", currentRound, acctIdx, regStartRound, regEndRound)
+ } else {
+ t.Logf("[.] Round %d\n", currentRound)
+ }
+
+ if currentRound == lastRound {
+ break
+ }
+ }
+
+}
+
+func addParticipationKey(a *require.Assertions, fixture *fixtures.RestClientFixture, acctNum uint64, startRound, endRound uint64, regTransactions map[int]transactions.SignedTxn) error {
+ dataDir := fixture.NodeDataDirs()[acctNum]
+ nc := fixture.GetNodeControllerForDataDir(dataDir)
+ genesisDir, err := nc.GetGenesisDir()
+
+ partKeyName := filepath.Join(dataDir, config.PartKeyFilename("Wallet", startRound, endRound))
+ partKeyNameTarget := filepath.Join(genesisDir, config.PartKeyFilename("Wallet", startRound, endRound))
+
+ // make the rename in the background to ensure it won't take too long. We have ~32 rounds to complete this.
+ go os.Rename(partKeyName, partKeyNameTarget)
+
+ signedTxn := regTransactions[int(startRound-2)]
+ a.NotEmpty(signedTxn.Sig)
+ _, err = fixture.GetAlgodClientForController(nc).SendRawTransaction(signedTxn)
+ if err != nil {
+ a.NoError(err)
+ return err
+ }
+ return err
+}
+
+func prepareParticipationKey(a *require.Assertions, fixture *fixtures.RestClientFixture, acctNum uint64, txStartRound, txEndRound, regStartRound, regEndRound uint64, genesisHash crypto.Digest, rootKeys map[int]*account.Root, regTransactions map[int]transactions.SignedTxn) error {
+ dataDir := fixture.NodeDataDirs()[acctNum]
+
+ nc := fixture.GetNodeControllerForDataDir(dataDir)
+ genesisDir, err := nc.GetGenesisDir()
+ if err != nil {
+ a.NoError(err)
+ return err
+ }
+ var rootAccount account.Root
+ if _, have := rootKeys[int(acctNum)]; !have {
+ var rootKeyFilename string
+ err = filepath.Walk(genesisDir, func(path string, f os.FileInfo, errIn error) error {
+ if errIn != nil {
+ return errIn
+ }
+ if f.IsDir() {
+ return nil
+ }
+ if config.IsRootKeyFilename(f.Name()) {
+ rootKeyFilename = path
+ }
+ return nil
+ })
+ if err != nil {
+ a.NoError(err)
+ return err
+ }
+
+ rootKeyHandle, err := db.MakeAccessor(rootKeyFilename, false, false)
+ if err != nil {
+ a.NoError(err)
+ return err
+ }
+
+ // generate a new participation key.
+ rootAccount, err = account.RestoreRoot(rootKeyHandle)
+ if err != nil {
+ a.NoError(err)
+ return err
+ }
+ rootKeys[int(acctNum)] = &rootAccount
+ rootKeyHandle.Close()
+ }
+ rootAccount = *rootKeys[int(acctNum)]
+
+ partKeyName := filepath.Join(dataDir, config.PartKeyFilename("Wallet", txStartRound+2, txEndRound))
+
+ partkeyHandle, err := db.MakeAccessor(partKeyName, false, false)
+ if err != nil {
+ a.NoError(err)
+ return err
+ }
+
+ persistedPerticipation, err := account.FillDBWithParticipationKeys(partkeyHandle, rootAccount.Address(), basics.Round(regStartRound), basics.Round(regEndRound), fixture.LibGoalFixture.Genesis().PartKeyDilution)
+ if err != nil {
+ a.NoError(err)
+ return err
+ }
+ partkeyHandle.Vacuum(context.Background())
+ persistedPerticipation.Close()
+
+ unsignedTxn := persistedPerticipation.GenerateRegistrationTransaction(basics.MicroAlgos{Raw: 1000}, basics.Round(txStartRound), basics.Round(txEndRound), [32]byte{})
+ copy(unsignedTxn.GenesisHash[:], genesisHash[:])
+ if err != nil {
+ a.NoError(err)
+ return err
+ }
+ regTransactions[int(txStartRound)] = unsignedTxn.Sign(rootAccount.Secrets())
+ return err
+}
diff --git a/test/e2e-go/features/participation/participationRewards_test.go b/test/e2e-go/features/participation/participationRewards_test.go
index 68a540ebe..055feb407 100644
--- a/test/e2e-go/features/participation/participationRewards_test.go
+++ b/test/e2e-go/features/participation/participationRewards_test.go
@@ -42,14 +42,15 @@ func getFirstAccountFromNamedNode(fixture *fixtures.RestClientFixture, r *requir
func waitUntilRewards(t *testing.T, fixture *fixtures.RestClientFixture, round uint64) (uint64, error) {
block, err := fixture.AlgodClient.Block(round)
- require.NoError(t, err)
+ a := require.New(fixtures.SynchronizedTest(t))
+ a.NoError(err)
for {
round++
err := fixture.WaitForRoundWithTimeout(round + 1)
- require.NoError(t, err)
+ a.NoError(err)
nextBlock, err := fixture.AlgodClient.Block(round)
- require.NoError(t, err)
+ a.NoError(err)
if nextBlock.RewardsLevel > block.RewardsLevel {
// reward level increased, rewards were granted
@@ -64,18 +65,19 @@ func waitUntilRewards(t *testing.T, fixture *fixtures.RestClientFixture, round u
}
func spendToNonParticipating(t *testing.T, fixture *fixtures.RestClientFixture, lastRound uint64, account string, balance uint64, minFee uint64) uint64 {
+ a := require.New(fixtures.SynchronizedTest(t))
// move a lot of Algos to a non participating account -- the incentive pool
poolAddr := basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} // hardcoded; change if the pool address changes
pd := poolAddr
drainTx, err := fixture.LibGoalClient.SendPaymentFromUnencryptedWallet(account, pd.String(), minFee, balance-balance/100-minFee, nil)
- require.NoError(t, err)
+ a.NoError(err)
fixture.WaitForAllTxnsToConfirm(lastRound+uint64(10), map[string]string{drainTx.ID().String(): account})
return balance / 100
}
func TestOnlineOfflineRewards(t *testing.T) {
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "FourNodes.json"))
@@ -137,7 +139,7 @@ func TestPartkeyOnlyRewards(t *testing.T) {
t.Skip()
}
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "FourNodes.json"))
@@ -180,7 +182,7 @@ func TestPartkeyOnlyRewards(t *testing.T) {
func TestRewardUnitThreshold(t *testing.T) {
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "FourNodes.json"))
@@ -299,7 +301,7 @@ var defaultPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0
func TestRewardRateRecalculation(t *testing.T) {
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
// consensusTestRapidRewardRecalculation is a version of ConsensusCurrentVersion
// that decreases the RewardsRateRefreshInterval greatly.
diff --git a/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go b/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go
index 961b5489a..813ee3216 100644
--- a/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go
+++ b/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go
@@ -34,7 +34,7 @@ func TestBasicPartitionRecovery(t *testing.T) {
t.Skip()
}
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
// Overview of this test:
// Start a two-node network (with 50% each)
@@ -114,7 +114,7 @@ func TestPartitionRecoveryStaggerRestart(t *testing.T) {
}
func runTestWithStaggeredStopStart(t *testing.T, fixture *fixtures.RestClientFixture) {
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
// Get Node1 so we can wait until it has reached the target round
nc1, err := fixture.GetNodeController("Node1")
@@ -159,7 +159,7 @@ func TestBasicPartitionRecoveryPartOffline(t *testing.T) {
t.Skip()
}
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
// Overview of this test:
// Start a three-node network capable of making progress.
@@ -210,7 +210,7 @@ func TestPartitionHalfOffline(t *testing.T) {
t.Skip()
}
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
// Overview of this test:
// Start a TenNodeDistributed network
diff --git a/test/e2e-go/features/teal/compile_test.go b/test/e2e-go/features/teal/compile_test.go
index 8622ce55c..16822febd 100644
--- a/test/e2e-go/features/teal/compile_test.go
+++ b/test/e2e-go/features/teal/compile_test.go
@@ -31,7 +31,7 @@ func TestTealCompile(t *testing.T) {
if testing.Short() {
t.Skip()
}
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.SetupNoStart(t, filepath.Join("nettemplates", "OneNodeFuture.json"))
diff --git a/test/e2e-go/features/transactions/accountv2_test.go b/test/e2e-go/features/transactions/accountv2_test.go
index 93fe6caf5..d403ffe20 100644
--- a/test/e2e-go/features/transactions/accountv2_test.go
+++ b/test/e2e-go/features/transactions/accountv2_test.go
@@ -34,7 +34,7 @@ import (
)
func checkEvalDelta(t *testing.T, client *libgoal.Client, startRnd, endRnd uint64, gval uint64, lval uint64) {
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
foundGlobal := false
foundLocal := false
@@ -76,7 +76,7 @@ func checkEvalDelta(t *testing.T, client *libgoal.Client, startRnd, endRnd uint6
func TestAccountInformationV2(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
proto, ok := config.Consensus[protocol.ConsensusFuture]
@@ -162,9 +162,11 @@ int 1
a.NoError(err)
round, err = client.CurrentRound()
a.NoError(err)
- _, err = client.BroadcastTransaction(signedTxn)
+ txid, err := client.BroadcastTransaction(signedTxn)
a.NoError(err)
- client.WaitForRound(round + 2)
+ // ensure transaction is accepted into a block within 5 rounds.
+ confirmed := fixture.WaitForAllTxnsToConfirm(round+5, map[string]string{txid: signedTxn.Txn.Sender.String()})
+ a.True(confirmed)
// check creator's balance record for the app entry and the state changes
ad, err = client.AccountData(creator)
@@ -207,7 +209,7 @@ int 1
a.NoError(err)
round, err = client.CurrentRound()
a.NoError(err)
- _, err = client.BroadcastTransaction(signedTxn)
+ txid, err = client.BroadcastTransaction(signedTxn)
a.NoError(err)
_, err = client.WaitForRound(round + 3)
a.NoError(err)
@@ -215,6 +217,9 @@ int 1
resp, err := client.GetPendingTransactions(2)
a.NoError(err)
a.Equal(uint64(0), resp.TotalTxns)
+ txinfo, err := client.TransactionInformation(signedTxn.Txn.Sender.String(), txid)
+ a.NoError(err)
+ a.True(txinfo.ConfirmedRound != 0)
// check creator's balance record for the app entry and the state changes
ad, err = client.AccountData(creator)
@@ -257,7 +262,7 @@ int 1
a.Equal(uint64(1), value.Uint)
// 2 global state update in total, 1 local state updates
- checkEvalDelta(t, &client, round, round+5, 2, 1)
+ checkEvalDelta(t, &client, round+2, round+5, 2, 1)
a.Equal(basics.MicroAlgos{Raw: 10000000000 - fee}, ad.MicroAlgos)
diff --git a/test/e2e-go/features/transactions/asset_test.go b/test/e2e-go/features/transactions/asset_test.go
index e0fd11ec5..4068bf03f 100644
--- a/test/e2e-go/features/transactions/asset_test.go
+++ b/test/e2e-go/features/transactions/asset_test.go
@@ -54,7 +54,7 @@ func helperFillSignBroadcast(client libgoal.Client, wh []byte, sender string, tx
func TestAssetValidRounds(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
@@ -188,7 +188,7 @@ func TestAssetConfig(t *testing.T) {
t.Skip()
}
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
@@ -420,7 +420,7 @@ func TestAssetConfig(t *testing.T) {
func TestAssetInformation(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
@@ -512,7 +512,7 @@ func TestAssetInformation(t *testing.T) {
func TestAssetGroupCreateSendDestroy(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
@@ -653,7 +653,7 @@ func TestAssetGroupCreateSendDestroy(t *testing.T) {
func TestAssetSend(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
@@ -1049,7 +1049,7 @@ func setupTestAndNetwork(t *testing.T, networkTemplate string, consensus config.
Assertions *require.Assertions, Fixture *fixtures.RestClientFixture, Client *libgoal.Client, Account0 string) {
t.Parallel()
- asser := require.New(t)
+ asser := require.New(fixtures.SynchronizedTest(t))
if 0 == len(networkTemplate) {
// If the networkTemplate is not specified, used the default one
networkTemplate = "TwoNodes50Each.json"
diff --git a/test/e2e-go/features/transactions/close_account_test.go b/test/e2e-go/features/transactions/close_account_test.go
index 77e134717..66f1a0138 100644
--- a/test/e2e-go/features/transactions/close_account_test.go
+++ b/test/e2e-go/features/transactions/close_account_test.go
@@ -27,7 +27,7 @@ import (
func TestAccountsCanClose(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachV15.json"))
diff --git a/test/e2e-go/features/transactions/group_test.go b/test/e2e-go/features/transactions/group_test.go
index 74b71bccf..b13929f2f 100644
--- a/test/e2e-go/features/transactions/group_test.go
+++ b/test/e2e-go/features/transactions/group_test.go
@@ -31,7 +31,7 @@ import (
func TestGroupTransactions(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
@@ -101,7 +101,7 @@ func TestGroupTransactions(t *testing.T) {
func TestGroupTransactionsDifferentSizes(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
@@ -207,7 +207,7 @@ func TestGroupTransactionsDifferentSizes(t *testing.T) {
func TestGroupTransactionsSubmission(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
diff --git a/test/e2e-go/features/transactions/lease_test.go b/test/e2e-go/features/transactions/lease_test.go
index 5c2102cd6..571776f37 100644
--- a/test/e2e-go/features/transactions/lease_test.go
+++ b/test/e2e-go/features/transactions/lease_test.go
@@ -28,7 +28,7 @@ import (
func TestLeaseTransactionsSameSender(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
@@ -87,7 +87,7 @@ func TestLeaseTransactionsSameSender(t *testing.T) {
func TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachV22.json"))
@@ -159,7 +159,7 @@ func TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7(t *testing.T) {
func TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
@@ -218,7 +218,7 @@ func TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7(t *testing.T) {
func TestLeaseTransactionsSameSenderDifferentLease(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
@@ -279,7 +279,7 @@ func TestLeaseTransactionsSameSenderDifferentLease(t *testing.T) {
func TestLeaseTransactionsDifferentSender(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
@@ -353,7 +353,7 @@ func TestLeaseTransactionsDifferentSender(t *testing.T) {
func TestOverlappingLeases(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
diff --git a/test/e2e-go/features/transactions/onlineStatusChange_test.go b/test/e2e-go/features/transactions/onlineStatusChange_test.go
index 8e4b5de6c..c56c75f18 100644
--- a/test/e2e-go/features/transactions/onlineStatusChange_test.go
+++ b/test/e2e-go/features/transactions/onlineStatusChange_test.go
@@ -40,7 +40,7 @@ func TestAccountsCanChangeOnlineStateInTheFuture(t *testing.T) {
func testAccountsCanChangeOnlineState(t *testing.T, templatePath string) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, templatePath)
diff --git a/test/e2e-go/features/transactions/proof_test.go b/test/e2e-go/features/transactions/proof_test.go
index 3409191df..c604c2b2b 100644
--- a/test/e2e-go/features/transactions/proof_test.go
+++ b/test/e2e-go/features/transactions/proof_test.go
@@ -30,7 +30,7 @@ import (
func TestTxnMerkleProof(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "OneNodeFuture.json"))
diff --git a/test/e2e-go/features/transactions/sendReceive_test.go b/test/e2e-go/features/transactions/sendReceive_test.go
index a19ace676..b91fe8b84 100644
--- a/test/e2e-go/features/transactions/sendReceive_test.go
+++ b/test/e2e-go/features/transactions/sendReceive_test.go
@@ -49,7 +49,7 @@ func TestAccountsCanSendMoney(t *testing.T) {
func testAccountsCanSendMoney(t *testing.T, templatePath string, numberOfSends int) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, templatePath)
diff --git a/test/e2e-go/features/transactions/transactionPool_test.go b/test/e2e-go/features/transactions/transactionPool_test.go
index d07d8a1c5..a6d235b18 100644
--- a/test/e2e-go/features/transactions/transactionPool_test.go
+++ b/test/e2e-go/features/transactions/transactionPool_test.go
@@ -29,7 +29,7 @@ import (
func TestTransactionPoolOrderingAndClearing(t *testing.T) {
t.Skip("test is flaky as of 2019-06-18")
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachOneOnline.json"))
@@ -46,7 +46,7 @@ func TestTransactionPoolOrderingAndClearing(t *testing.T) {
stoppedRound := curStatus.LastRound
minTxnFee, minAcctBalance, err := fixture.MinFeeAndBalance(curStatus.LastRound)
- require.NoError(t, err)
+ r.NoError(err)
// put transactions in the pool - they cannot be removed from the pool while the node is stopped
numTransactions := 25
@@ -115,7 +115,7 @@ func TestTransactionPoolExponentialFees(t *testing.T) {
t.Skip("new FIFO pool does not have exponential fee txn replacement")
t.Parallel()
- r := require.New(t)
+ r := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
diff --git a/test/e2e-go/kmd/e2e_kmd_server_client_test.go b/test/e2e-go/kmd/e2e_kmd_server_client_test.go
index b08219f8a..cb7c13c25 100644
--- a/test/e2e-go/kmd/e2e_kmd_server_client_test.go
+++ b/test/e2e-go/kmd/e2e_kmd_server_client_test.go
@@ -28,6 +28,7 @@ import (
)
func TestServerStartsStopsSuccessfully(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
f.Setup(t)
@@ -37,11 +38,12 @@ func TestServerStartsStopsSuccessfully(t *testing.T) {
req := kmdapi.VersionsRequest{}
resp := kmdapi.VersionsResponse{}
err := f.Client.DoV1Request(req, &resp)
- require.NoError(t, err)
+ a.NoError(err)
}
func TestBadAuthFails(t *testing.T) {
t.Parallel()
+ a := require.New(fixtures.SynchronizedTest(t))
var f fixtures.KMDFixture
f.Setup(t)
defer f.Shutdown()
@@ -49,16 +51,17 @@ func TestBadAuthFails(t *testing.T) {
// Make a client with a bad token
badAPIToken := strings.Repeat("x", 64)
client, err := client.MakeKMDClient(f.Sock, badAPIToken)
- require.NoError(t, err)
+ a.NoError(err)
// Test that `GET /v1/wallets` fails with the bad token
req := kmdapi.APIV1GETWalletsRequest{}
resp := kmdapi.APIV1GETWalletsResponse{}
err = client.DoV1Request(req, &resp)
- require.Error(t, err)
+ a.Error(err)
}
func TestGoodAuthSucceeds(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
f.Setup(t)
@@ -69,5 +72,5 @@ func TestGoodAuthSucceeds(t *testing.T) {
req := kmdapi.APIV1GETWalletsRequest{}
resp := kmdapi.APIV1GETWalletsResponse{}
err := f.Client.DoV1Request(req, &resp)
- require.NoError(t, err)
+ a.NoError(err)
}
diff --git a/test/e2e-go/kmd/e2e_kmd_sqlite_test.go b/test/e2e-go/kmd/e2e_kmd_sqlite_test.go
index 45cee971e..63dda845d 100644
--- a/test/e2e-go/kmd/e2e_kmd_sqlite_test.go
+++ b/test/e2e-go/kmd/e2e_kmd_sqlite_test.go
@@ -26,6 +26,7 @@ import (
)
func TestNonAbsSQLiteWalletConfigFails(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
f.Initialize(t)
@@ -35,12 +36,13 @@ func TestNonAbsSQLiteWalletConfigFails(t *testing.T) {
cfg := `{"drivers":{"sqlite":{"wallets_dir":"not/absolute"}}}`
err := f.TestConfig([]byte(cfg))
// Should return an error
- require.NotNil(t, err)
+ a.NotNil(err)
// Should return the correct error
- require.Equal(t, err, config.ErrSQLiteWalletNotAbsolute)
+ a.Equal(err, config.ErrSQLiteWalletNotAbsolute)
}
func TestAbsSQLiteWalletConfigSucceeds(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
f.Initialize(t)
@@ -50,5 +52,5 @@ func TestAbsSQLiteWalletConfigSucceeds(t *testing.T) {
cfg := `{"drivers":{"sqlite":{"wallets_dir":"/very/absolute"}}}`
err := f.TestConfig([]byte(cfg))
// Error should be nil
- require.Nil(t, err)
+ a.Nil(err)
}
diff --git a/test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go b/test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go
index f10388a44..b119f716f 100644
--- a/test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go
+++ b/test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go
@@ -32,6 +32,7 @@ import (
)
func TestGenerateAndListKeys(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
walletHandleToken := f.SetupWithWallet(t)
@@ -43,10 +44,10 @@ func TestGenerateAndListKeys(t *testing.T) {
}
resp0 := kmdapi.APIV1POSTKeyResponse{}
err := f.Client.DoV1Request(req0, &resp0)
- require.NoError(t, err)
+ a.NoError(err)
// Key should not be empty
- require.NotEmpty(t, resp0.Address)
+ a.NotEmpty(resp0.Address)
// List public keys
req1 := kmdapi.APIV1POSTKeyListRequest{
@@ -54,13 +55,13 @@ func TestGenerateAndListKeys(t *testing.T) {
}
resp1 := kmdapi.APIV1POSTKeyListResponse{}
err = f.Client.DoV1Request(req1, &resp1)
- require.NoError(t, err)
+ a.NoError(err)
// List should have exactly one entry
- require.Equal(t, len(resp1.Addresses), 1)
+ a.Equal(len(resp1.Addresses), 1)
// Only entry should equal generated public key
- require.Equal(t, resp1.Addresses[0], resp0.Address)
+ a.Equal(resp1.Addresses[0], resp0.Address)
// Generate another key
req2 := kmdapi.APIV1POSTKeyRequest{
@@ -68,7 +69,7 @@ func TestGenerateAndListKeys(t *testing.T) {
}
resp2 := kmdapi.APIV1POSTKeyResponse{}
err = f.Client.DoV1Request(req2, &resp2)
- require.NoError(t, err)
+ a.NoError(err)
// List public keys
req3 := kmdapi.APIV1POSTKeyListRequest{
@@ -76,13 +77,14 @@ func TestGenerateAndListKeys(t *testing.T) {
}
resp3 := kmdapi.APIV1POSTKeyListResponse{}
err = f.Client.DoV1Request(req3, &resp3)
- require.NoError(t, err)
+ a.NoError(err)
// List should have exactly two entries
- require.Equal(t, len(resp3.Addresses), 2)
+ a.Equal(len(resp3.Addresses), 2)
}
func TestImportKey(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
walletHandleToken := f.SetupWithWallet(t)
@@ -100,10 +102,10 @@ func TestImportKey(t *testing.T) {
}
resp0 := kmdapi.APIV1POSTKeyImportResponse{}
err := f.Client.DoV1Request(req0, &resp0)
- require.NoError(t, err)
+ a.NoError(err)
// Public key should be that of the key we imported
- require.Equal(t, resp0.Address, basics.Address(secrets.SignatureVerifier).GetUserAddress())
+ a.Equal(resp0.Address, basics.Address(secrets.SignatureVerifier).GetUserAddress())
// Try to import the same key
req1 := kmdapi.APIV1POSTKeyImportRequest{
@@ -114,7 +116,7 @@ func TestImportKey(t *testing.T) {
err = f.Client.DoV1Request(req1, &resp1)
// Should fail (duplicate key)
- require.Error(t, err)
+ a.Error(err)
// List public keys
req2 := kmdapi.APIV1POSTKeyListRequest{
@@ -122,16 +124,17 @@ func TestImportKey(t *testing.T) {
}
resp2 := kmdapi.APIV1POSTKeyListResponse{}
err = f.Client.DoV1Request(req2, &resp2)
- require.NoError(t, err)
+ a.NoError(err)
// List should have exactly one entry
- require.Equal(t, len(resp2.Addresses), 1)
+ a.Equal(len(resp2.Addresses), 1)
// Only entry should equal generated public key
- require.Equal(t, resp2.Addresses[0], basics.Address(secrets.SignatureVerifier).GetUserAddress())
+ a.Equal(resp2.Addresses[0], basics.Address(secrets.SignatureVerifier).GetUserAddress())
}
func TestExportKey(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
walletHandleToken := f.SetupWithWallet(t)
@@ -149,10 +152,10 @@ func TestExportKey(t *testing.T) {
}
resp0 := kmdapi.APIV1POSTKeyImportResponse{}
err := f.Client.DoV1Request(req0, &resp0)
- require.NoError(t, err)
+ a.NoError(err)
// Public key should be that of the key we imported
- require.Equal(t, resp0.Address, basics.Address(secrets.SignatureVerifier).GetUserAddress())
+ a.Equal(resp0.Address, basics.Address(secrets.SignatureVerifier).GetUserAddress())
// List public keys
req1 := kmdapi.APIV1POSTKeyListRequest{
@@ -160,13 +163,13 @@ func TestExportKey(t *testing.T) {
}
resp1 := kmdapi.APIV1POSTKeyListResponse{}
err = f.Client.DoV1Request(req1, &resp1)
- require.NoError(t, err)
+ a.NoError(err)
// List should have exactly one entry
- require.Equal(t, len(resp1.Addresses), 1)
+ a.Equal(len(resp1.Addresses), 1)
// Only entry should equal generated public key
- require.Equal(t, resp1.Addresses[0], basics.Address(secrets.SignatureVerifier).GetUserAddress())
+ a.Equal(resp1.Addresses[0], basics.Address(secrets.SignatureVerifier).GetUserAddress())
// Export the key
req2 := kmdapi.APIV1POSTKeyExportRequest{
@@ -176,10 +179,10 @@ func TestExportKey(t *testing.T) {
}
resp2 := kmdapi.APIV1POSTKeyExportResponse{}
err = f.Client.DoV1Request(req2, &resp2)
- require.NoError(t, err)
+ a.NoError(err)
// Response should be same secret key
- require.Equal(t, resp2.PrivateKey, crypto.PrivateKey(secrets.SK))
+ a.Equal(resp2.PrivateKey, crypto.PrivateKey(secrets.SK))
// Export with wrong password should fail
req3 := kmdapi.APIV1POSTKeyExportRequest{
@@ -189,10 +192,11 @@ func TestExportKey(t *testing.T) {
}
resp3 := kmdapi.APIV1POSTKeyExportResponse{}
err = f.Client.DoV1Request(req3, &resp3)
- require.Error(t, err)
+ a.Error(err)
}
func TestDeleteKey(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
walletHandleToken := f.SetupWithWallet(t)
@@ -204,10 +208,10 @@ func TestDeleteKey(t *testing.T) {
}
resp0 := kmdapi.APIV1POSTKeyResponse{}
err := f.Client.DoV1Request(req0, &resp0)
- require.NoError(t, err)
+ a.NoError(err)
// Token should not be empty
- require.NotEqual(t, resp0.Address, crypto.Digest{})
+ a.NotEqual(resp0.Address, crypto.Digest{})
// List public keys
req1 := kmdapi.APIV1POSTKeyListRequest{
@@ -215,13 +219,13 @@ func TestDeleteKey(t *testing.T) {
}
resp1 := kmdapi.APIV1POSTKeyListResponse{}
err = f.Client.DoV1Request(req1, &resp1)
- require.NoError(t, err)
+ a.NoError(err)
// List should have exactly one entry
- require.Equal(t, len(resp1.Addresses), 1)
+ a.Equal(len(resp1.Addresses), 1)
// Only entry should equal generated public key
- require.Equal(t, resp1.Addresses[0], resp0.Address)
+ a.Equal(resp1.Addresses[0], resp0.Address)
// Delete with wrong password should fail
req2 := kmdapi.APIV1DELETEKeyRequest{
@@ -231,7 +235,7 @@ func TestDeleteKey(t *testing.T) {
}
resp2 := kmdapi.APIV1DELETEKeyResponse{}
err = f.Client.DoV1Request(req2, &resp2)
- require.Error(t, err)
+ a.Error(err)
// Try to delete the key
req3 := kmdapi.APIV1DELETEKeyRequest{
@@ -241,7 +245,7 @@ func TestDeleteKey(t *testing.T) {
}
resp3 := kmdapi.APIV1DELETEKeyResponse{}
err = f.Client.DoV1Request(req3, &resp3)
- require.NoError(t, err)
+ a.NoError(err)
// List public keys
req4 := kmdapi.APIV1POSTKeyListRequest{
@@ -249,13 +253,14 @@ func TestDeleteKey(t *testing.T) {
}
resp4 := kmdapi.APIV1POSTKeyListResponse{}
err = f.Client.DoV1Request(req4, &resp4)
- require.NoError(t, err)
+ a.NoError(err)
// List should have exactly zero entries
- require.Equal(t, len(resp4.Addresses), 0)
+ a.Equal(len(resp4.Addresses), 0)
}
func TestSignTransaction(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
walletHandleToken := f.SetupWithWallet(t)
@@ -274,7 +279,7 @@ func TestSignTransaction(t *testing.T) {
}
resp0 := kmdapi.APIV1POSTKeyImportResponse{}
err := f.Client.DoV1Request(req0, &resp0)
- require.NoError(t, err)
+ a.NoError(err)
// Make a transaction
tx := transactions.Transaction{
@@ -300,19 +305,20 @@ func TestSignTransaction(t *testing.T) {
}
resp1 := kmdapi.APIV1POSTTransactionSignResponse{}
err = f.Client.DoV1Request(req1, &resp1)
- require.NoError(t, err)
+ a.NoError(err)
// SignedTxn signature should not be empty
var stx transactions.SignedTxn
err = protocol.Decode(resp1.SignedTransaction, &stx)
- require.NoError(t, err)
- require.NotEqual(t, stx.Sig, crypto.Signature{})
+ a.NoError(err)
+ a.NotEqual(stx.Sig, crypto.Signature{})
// TODO The SignedTxn should actually verify
- // require.NoError(t, stx.Verify())
+ // a.NoError(stx.Verify())
}
func TestSignProgram(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
walletHandleToken := f.SetupWithWallet(t)
@@ -331,7 +337,7 @@ func TestSignProgram(t *testing.T) {
}
resp0 := kmdapi.APIV1POSTKeyImportResponse{}
err := f.Client.DoV1Request(req0, &resp0)
- require.NoError(t, err)
+ a.NoError(err)
program := []byte("blah blah blah, not a real program, just some bytes to sign, kmd does not have a program interpreter to know if the program is legitimate, but it _does_ prefix the program with protocol.Program and we can verify that here below")
@@ -346,19 +352,20 @@ func TestSignProgram(t *testing.T) {
}
resp1 := kmdapi.APIV1POSTProgramSignResponse{}
err = f.Client.DoV1Request(req1, &resp1)
- require.NoError(t, err)
+ a.NoError(err)
// SignedTxn signature should not be empty
- require.NotEmpty(t, len(resp1.Signature), 0)
+ a.NotEmpty(len(resp1.Signature), 0)
var sig crypto.Signature
copy(sig[:], resp1.Signature)
- require.NotEqual(t, sig, crypto.Signature{})
+ a.NotEqual(sig, crypto.Signature{})
ph := logic.Program(program)
- require.True(t, secrets.SignatureVerifier.Verify(ph, sig))
+ a.True(secrets.SignatureVerifier.Verify(ph, sig))
}
func BenchmarkSignTransaction(b *testing.B) {
+ a := require.New(fixtures.SynchronizedTest(b))
var f fixtures.KMDFixture
walletHandleToken := f.SetupWithWallet(b)
defer f.Shutdown()
@@ -376,7 +383,7 @@ func BenchmarkSignTransaction(b *testing.B) {
}
resp0 := kmdapi.APIV1POSTKeyImportResponse{}
err := f.Client.DoV1Request(req0, &resp0)
- require.NoError(b, err)
+ a.NoError(err)
// Make a transaction
tx := transactions.Transaction{
@@ -404,12 +411,13 @@ func BenchmarkSignTransaction(b *testing.B) {
}
resp1 := kmdapi.APIV1POSTTransactionSignResponse{}
err = f.Client.DoV1Request(req1, &resp1)
- require.NoError(b, err)
+ a.NoError(err)
}
})
}
func TestMasterKeyImportExport(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
walletHandleToken := f.SetupWithWallet(t)
@@ -421,11 +429,11 @@ func TestMasterKeyImportExport(t *testing.T) {
}
resp0 := kmdapi.APIV1POSTKeyResponse{}
err := f.Client.DoV1Request(req0, &resp0)
- require.NoError(t, err)
+ a.NoError(err)
// Key should not be empty
key0 := resp0.Address
- require.NotEqual(t, key0, crypto.Digest{})
+ a.NotEqual(key0, crypto.Digest{})
// Generate another key
req1 := kmdapi.APIV1POSTKeyRequest{
@@ -433,11 +441,11 @@ func TestMasterKeyImportExport(t *testing.T) {
}
resp1 := kmdapi.APIV1POSTKeyResponse{}
err = f.Client.DoV1Request(req1, &resp1)
- require.NoError(t, err)
+ a.NoError(err)
// Key should not be empty
key1 := resp1.Address
- require.NotEqual(t, key1, crypto.Digest{})
+ a.NotEqual(key1, crypto.Digest{})
// Export master key with incorrect password should fail
req2 := kmdapi.APIV1POSTMasterKeyExportRequest{
@@ -446,7 +454,7 @@ func TestMasterKeyImportExport(t *testing.T) {
}
resp2 := kmdapi.APIV1POSTMasterKeyExportResponse{}
err = f.Client.DoV1Request(req2, &resp2)
- require.Error(t, err)
+ a.Error(err)
// Export master key with correct password should succeed
req3 := kmdapi.APIV1POSTMasterKeyExportRequest{
@@ -455,11 +463,11 @@ func TestMasterKeyImportExport(t *testing.T) {
}
resp3 := kmdapi.APIV1POSTMasterKeyExportResponse{}
err = f.Client.DoV1Request(req3, &resp3)
- require.NoError(t, err)
+ a.NoError(err)
// MDK should not be blank
mdk0 := resp3.MasterDerivationKey
- require.NotEqual(t, mdk0, crypto.MasterDerivationKey{})
+ a.NotEqual(mdk0, crypto.MasterDerivationKey{})
// Create another wallet, don't import the MDK
pw := "unrelated-password"
@@ -470,11 +478,11 @@ func TestMasterKeyImportExport(t *testing.T) {
}
resp4 := kmdapi.APIV1POSTWalletResponse{}
err = f.Client.DoV1Request(req4, &resp4)
- require.NoError(t, err)
+ a.NoError(err)
// Get the new wallet ID
unrelatedWalletID := resp4.Wallet.ID
- require.NotEmpty(t, unrelatedWalletID)
+ a.NotEmpty(unrelatedWalletID)
// Get a wallet token
req5 := kmdapi.APIV1POSTWalletInitRequest{
@@ -483,7 +491,7 @@ func TestMasterKeyImportExport(t *testing.T) {
}
resp5 := kmdapi.APIV1POSTWalletInitResponse{}
err = f.Client.DoV1Request(req5, &resp5)
- require.NoError(t, err)
+ a.NoError(err)
// Generate a key for the unrelated wallet
req6 := kmdapi.APIV1POSTKeyRequest{
@@ -491,15 +499,15 @@ func TestMasterKeyImportExport(t *testing.T) {
}
resp6 := kmdapi.APIV1POSTKeyResponse{}
err = f.Client.DoV1Request(req6, &resp6)
- require.NoError(t, err)
+ a.NoError(err)
// Key should not be empty
key2 := resp6.Address
- require.NotEqual(t, key2, crypto.Digest{})
+ a.NotEqual(key2, crypto.Digest{})
// Key should not be equal to either of the keys from the first wallet
- require.NotEqual(t, key2, key0)
- require.NotEqual(t, key2, key1)
+ a.NotEqual(key2, key0)
+ a.NotEqual(key2, key1)
// Create another wallet, import the MDK
pw = "related-password"
@@ -511,11 +519,11 @@ func TestMasterKeyImportExport(t *testing.T) {
}
resp7 := kmdapi.APIV1POSTWalletResponse{}
err = f.Client.DoV1Request(req7, &resp7)
- require.NoError(t, err)
+ a.NoError(err)
// Get the new wallet ID
relatedWalletID := resp7.Wallet.ID
- require.NotEmpty(t, relatedWalletID)
+ a.NotEmpty(relatedWalletID)
// Get a wallet token
req8 := kmdapi.APIV1POSTWalletInitRequest{
@@ -524,7 +532,7 @@ func TestMasterKeyImportExport(t *testing.T) {
}
resp8 := kmdapi.APIV1POSTWalletInitResponse{}
err = f.Client.DoV1Request(req8, &resp8)
- require.NoError(t, err)
+ a.NoError(err)
relatedWalletHandleToken := resp8.WalletHandleToken
@@ -534,11 +542,11 @@ func TestMasterKeyImportExport(t *testing.T) {
}
resp9 := kmdapi.APIV1POSTKeyResponse{}
err = f.Client.DoV1Request(req9, &resp9)
- require.NoError(t, err)
+ a.NoError(err)
// Key should not be empty
key3 := resp9.Address
- require.NotEqual(t, key3, crypto.Digest{})
+ a.NotEqual(key3, crypto.Digest{})
// Generate another key
req10 := kmdapi.APIV1POSTKeyRequest{
@@ -546,17 +554,17 @@ func TestMasterKeyImportExport(t *testing.T) {
}
resp10 := kmdapi.APIV1POSTKeyResponse{}
err = f.Client.DoV1Request(req10, &resp10)
- require.NoError(t, err)
+ a.NoError(err)
// Key should not be empty
key4 := resp1.Address
- require.NotEqual(t, key4, crypto.Digest{})
+ a.NotEqual(key4, crypto.Digest{})
// key3 should be the same as key0
- require.Equal(t, key3, key0)
+ a.Equal(key3, key0)
// key4 should be the same as key1
- require.Equal(t, key4, key1)
+ a.Equal(key4, key1)
// Export master key for related wallet
req11 := kmdapi.APIV1POSTMasterKeyExportRequest{
@@ -565,17 +573,18 @@ func TestMasterKeyImportExport(t *testing.T) {
}
resp11 := kmdapi.APIV1POSTMasterKeyExportResponse{}
err = f.Client.DoV1Request(req11, &resp11)
- require.NoError(t, err)
+ a.NoError(err)
// MDK should not be blank
mdk1 := resp11.MasterDerivationKey
- require.NotEqual(t, mdk1, crypto.MasterDerivationKey{})
+ a.NotEqual(mdk1, crypto.MasterDerivationKey{})
// MDK should be the same as the first mdk
- require.Equal(t, mdk0, mdk1)
+ a.Equal(mdk0, mdk1)
}
func TestMasterKeyGeneratePastImportedKeys(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
walletHandleToken := f.SetupWithWallet(t)
@@ -589,11 +598,11 @@ func TestMasterKeyGeneratePastImportedKeys(t *testing.T) {
}
resp := kmdapi.APIV1POSTKeyResponse{}
err := f.Client.DoV1Request(req, &resp)
- require.NoError(t, err)
+ a.NoError(err)
// Key should not be empty
addr := resp.Address
- require.NotEmpty(t, addr)
+ a.NotEmpty(addr)
addrs = append(addrs, addr)
}
@@ -604,11 +613,11 @@ func TestMasterKeyGeneratePastImportedKeys(t *testing.T) {
}
resp0 := kmdapi.APIV1POSTMasterKeyExportResponse{}
err := f.Client.DoV1Request(req0, &resp0)
- require.NoError(t, err)
+ a.NoError(err)
// MDK should not be blank
mdk := resp0.MasterDerivationKey
- require.NotEqual(t, mdk, crypto.MasterDerivationKey{})
+ a.NotEqual(mdk, crypto.MasterDerivationKey{})
// Create another wallet, import the MDK
pw := "related-password"
@@ -620,11 +629,11 @@ func TestMasterKeyGeneratePastImportedKeys(t *testing.T) {
}
resp1 := kmdapi.APIV1POSTWalletResponse{}
err = f.Client.DoV1Request(req1, &resp1)
- require.NoError(t, err)
+ a.NoError(err)
// Get the new wallet ID
relatedWalletID := resp1.Wallet.ID
- require.NotEmpty(t, relatedWalletID)
+ a.NotEmpty(relatedWalletID)
// Get a wallet token
req2 := kmdapi.APIV1POSTWalletInitRequest{
@@ -633,7 +642,7 @@ func TestMasterKeyGeneratePastImportedKeys(t *testing.T) {
}
resp2 := kmdapi.APIV1POSTWalletInitResponse{}
err = f.Client.DoV1Request(req2, &resp2)
- require.NoError(t, err)
+ a.NoError(err)
relatedWalletHandleToken := resp2.WalletHandleToken
@@ -643,14 +652,14 @@ func TestMasterKeyGeneratePastImportedKeys(t *testing.T) {
}
resp3 := kmdapi.APIV1POSTKeyResponse{}
err = f.Client.DoV1Request(req3, &resp3)
- require.NoError(t, err)
+ a.NoError(err)
// Key should not be empty
addr0 := resp3.Address
- require.NotEmpty(t, addr0)
+ a.NotEmpty(addr0)
// key0 should be the same as keys[0]
- require.Equal(t, addr0, addrs[0])
+ a.Equal(addr0, addrs[0])
// Export keys[1]'s secret key from the first wallet
req4 := kmdapi.APIV1POSTKeyExportRequest{
@@ -660,11 +669,11 @@ func TestMasterKeyGeneratePastImportedKeys(t *testing.T) {
}
resp4 := kmdapi.APIV1POSTKeyExportResponse{}
err = f.Client.DoV1Request(req4, &resp4)
- require.NoError(t, err)
+ a.NoError(err)
// Exported secret should not be blank
key1Secret := resp4.PrivateKey
- require.NotEqual(t, key1Secret, crypto.PrivateKey{})
+ a.NotEqual(key1Secret, crypto.PrivateKey{})
// Import keys[1] into the second wallet
req5 := kmdapi.APIV1POSTKeyImportRequest{
@@ -673,10 +682,10 @@ func TestMasterKeyGeneratePastImportedKeys(t *testing.T) {
}
resp5 := kmdapi.APIV1POSTKeyImportResponse{}
err = f.Client.DoV1Request(req5, &resp5)
- require.NoError(t, err)
+ a.NoError(err)
// Address should be addrs[1]
- require.Equal(t, resp5.Address, addrs[1])
+ a.Equal(resp5.Address, addrs[1])
// Generate another key in the second wallet
req6 := kmdapi.APIV1POSTKeyRequest{
@@ -684,12 +693,12 @@ func TestMasterKeyGeneratePastImportedKeys(t *testing.T) {
}
resp6 := kmdapi.APIV1POSTKeyResponse{}
err = f.Client.DoV1Request(req6, &resp6)
- require.NoError(t, err)
+ a.NoError(err)
// Address should not be empty
addr1 := resp6.Address
- require.NotEmpty(t, addr1)
+ a.NotEmpty(addr1)
// Address should be equal to addrs[2]
- require.Equal(t, addr1, addrs[2])
+ a.Equal(addr1, addrs[2])
}
diff --git a/test/e2e-go/kmd/e2e_kmd_wallet_multisig_test.go b/test/e2e-go/kmd/e2e_kmd_wallet_multisig_test.go
index e8fa47930..3a5576a6e 100644
--- a/test/e2e-go/kmd/e2e_kmd_wallet_multisig_test.go
+++ b/test/e2e-go/kmd/e2e_kmd_wallet_multisig_test.go
@@ -32,12 +32,14 @@ import (
)
func addrToPK(t *testing.T, addr string) crypto.PublicKey {
+ req := require.New(fixtures.SynchronizedTest(t))
a, err := basics.UnmarshalChecksumAddress(addr)
- require.NoError(t, err)
+ req.NoError(err)
return crypto.PublicKey(a)
}
func TestMultisigImportList(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
walletHandleToken := f.SetupWithWallet(t)
@@ -49,12 +51,12 @@ func TestMultisigImportList(t *testing.T) {
}
resp0 := kmdapi.APIV1POSTKeyResponse{}
err := f.Client.DoV1Request(req0, &resp0)
- require.NoError(t, err)
+ a.NoError(err)
addr0 := resp0.Address
pk0 := addrToPK(t, addr0)
err = f.Client.DoV1Request(req0, &resp0)
- require.NoError(t, err)
+ a.NoError(err)
addr1 := resp0.Address
pk1 := addrToPK(t, addr1)
@@ -67,7 +69,7 @@ func TestMultisigImportList(t *testing.T) {
}
resp1 := kmdapi.APIV1POSTMultisigImportResponse{}
err = f.Client.DoV1Request(req1, &resp1)
- require.NoError(t, err)
+ a.NoError(err)
addr := resp1.Address
// List multisig addresses and make sure it's there
@@ -76,14 +78,15 @@ func TestMultisigImportList(t *testing.T) {
}
resp2 := kmdapi.APIV1POSTMultisigListResponse{}
err = f.Client.DoV1Request(req2, &resp2)
- require.NoError(t, err)
+ a.NoError(err)
// Make sure the imported multisig address is there
- require.Equal(t, len(resp2.Addresses), 1)
- require.Equal(t, resp2.Addresses[0], addr)
+ a.Equal(len(resp2.Addresses), 1)
+ a.Equal(resp2.Addresses[0], addr)
}
func TestMultisigExportDelete(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
walletHandleToken := f.SetupWithWallet(t)
@@ -95,12 +98,12 @@ func TestMultisigExportDelete(t *testing.T) {
}
resp0 := kmdapi.APIV1POSTKeyResponse{}
err := f.Client.DoV1Request(req0, &resp0)
- require.NoError(t, err)
+ a.NoError(err)
addr0 := resp0.Address
pk0 := addrToPK(t, addr0)
err = f.Client.DoV1Request(req0, &resp0)
- require.NoError(t, err)
+ a.NoError(err)
addr1 := resp0.Address
pk1 := addrToPK(t, addr1)
@@ -113,7 +116,7 @@ func TestMultisigExportDelete(t *testing.T) {
}
resp1 := kmdapi.APIV1POSTMultisigImportResponse{}
err = f.Client.DoV1Request(req1, &resp1)
- require.NoError(t, err)
+ a.NoError(err)
addr := resp1.Address
// Export the multisig preimage
@@ -123,12 +126,12 @@ func TestMultisigExportDelete(t *testing.T) {
}
resp2 := kmdapi.APIV1POSTMultisigExportResponse{}
err = f.Client.DoV1Request(req2, &resp2)
- require.NoError(t, err)
+ a.NoError(err)
// Make sure the exported preimage is correct
- require.Equal(t, req1.Version, resp2.Version)
- require.Equal(t, req1.Threshold, resp2.Threshold)
- require.Equal(t, req1.PKs, resp2.PKs)
+ a.Equal(req1.Version, resp2.Version)
+ a.Equal(req1.Threshold, resp2.Threshold)
+ a.Equal(req1.PKs, resp2.PKs)
// Delete the multisig preimage
req3 := kmdapi.APIV1DELETEMultisigRequest{
@@ -138,7 +141,7 @@ func TestMultisigExportDelete(t *testing.T) {
}
resp3 := kmdapi.APIV1DELETEMultisigResponse{}
err = f.Client.DoV1Request(req3, &resp3)
- require.NoError(t, err)
+ a.NoError(err)
// List multisig addresses and make sure it's empty
req4 := kmdapi.APIV1POSTMultisigListRequest{
@@ -146,30 +149,31 @@ func TestMultisigExportDelete(t *testing.T) {
}
resp4 := kmdapi.APIV1POSTMultisigListResponse{}
err = f.Client.DoV1Request(req4, &resp4)
- require.NoError(t, err)
+ a.NoError(err)
// Make sure the imported multisig address is gone
- require.Equal(t, len(resp4.Addresses), 0)
+ a.Equal(len(resp4.Addresses), 0)
}
func TestMultisigSign(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
walletHandleToken := f.SetupWithWallet(t)
defer f.Shutdown()
resp, err := f.Client.GenerateKey([]byte(walletHandleToken))
- require.NoError(t, err)
+ a.NoError(err)
pk1 := addrToPK(t, resp.Address)
resp, err = f.Client.GenerateKey([]byte(walletHandleToken))
- require.NoError(t, err)
+ a.NoError(err)
pk2 := addrToPK(t, resp.Address)
pk3 := crypto.PublicKey{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} // some public key we haven't imported
// Create a 2-of-3 multisig account from the three public keys
resp1, err := f.Client.ImportMultisigAddr([]byte(walletHandleToken), 1, 2, []crypto.PublicKey{pk1, pk2, pk3})
- require.NoError(t, err)
+ a.NoError(err)
msigAddr := addrToPK(t, resp1.Address)
// Make a transaction spending from the multisig address
@@ -197,11 +201,11 @@ func TestMultisigSign(t *testing.T) {
}
resp2 := kmdapi.APIV1POSTMultisigTransactionSignResponse{}
err = f.Client.DoV1Request(req2, &resp2)
- require.NoError(t, err)
+ a.NoError(err)
var msig crypto.MultisigSig
err = protocol.Decode(resp2.Multisig, &msig)
- require.NoError(t, err)
+ a.NoError(err)
// Try to add another signature
req3 := kmdapi.APIV1POSTMultisigTransactionSignRequest{
@@ -213,39 +217,40 @@ func TestMultisigSign(t *testing.T) {
}
resp3 := kmdapi.APIV1POSTMultisigTransactionSignResponse{}
err = f.Client.DoV1Request(req3, &resp3)
- require.NoError(t, err)
+ a.NoError(err)
// Assemble them into a signed transaction and see if it verifies
_, err = transactions.AssembleSignedTxn(tx, crypto.Signature{}, msig)
- require.NoError(t, err)
+ a.NoError(err)
// TODO See if the signature verifies
// err = stxn.Verify()
- // require.NoError(t, err)
+ // a.NoError(err)
}
func TestMultisigSignWithSigner(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
walletHandleToken := f.SetupWithWallet(t)
defer f.Shutdown()
resp, err := f.Client.GenerateKey([]byte(walletHandleToken))
- require.NoError(t, err)
+ a.NoError(err)
pk1 := addrToPK(t, resp.Address)
resp, err = f.Client.GenerateKey([]byte(walletHandleToken))
- require.NoError(t, err)
+ a.NoError(err)
pk2 := addrToPK(t, resp.Address)
pk3 := crypto.PublicKey{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} // some public key we haven't imported
sender, err := f.Client.GenerateKey([]byte(walletHandleToken))
- require.NoError(t, err)
+ a.NoError(err)
pkSender := addrToPK(t, sender.Address)
// Create a 2-of-3 multisig account from the three public keys
resp1, err := f.Client.ImportMultisigAddr([]byte(walletHandleToken), 1, 2, []crypto.PublicKey{pk1, pk2, pk3})
- require.NoError(t, err)
+ a.NoError(err)
msigAddr := addrToPK(t, resp1.Address)
// Make a transaction spending from the multisig address
@@ -278,11 +283,11 @@ func TestMultisigSignWithSigner(t *testing.T) {
}
resp2 := kmdapi.APIV1POSTMultisigTransactionSignResponse{}
err = f.Client.DoV1Request(req2, &resp2)
- require.NoError(t, err)
+ a.NoError(err)
var msig crypto.MultisigSig
err = protocol.Decode(resp2.Multisig, &msig)
- require.NoError(t, err)
+ a.NoError(err)
// Try to add another signature
req3 := kmdapi.APIV1POSTMultisigTransactionSignRequest{
@@ -295,35 +300,36 @@ func TestMultisigSignWithSigner(t *testing.T) {
}
resp3 := kmdapi.APIV1POSTMultisigTransactionSignResponse{}
err = f.Client.DoV1Request(req3, &resp3)
- require.NoError(t, err)
+ a.NoError(err)
// Assemble them into a signed transaction and see if it verifies
_, err = transactions.AssembleSignedTxn(tx, crypto.Signature{}, msig)
- require.NoError(t, err)
+ a.NoError(err)
}
func TestMultisigSignWithWrongSigner(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
walletHandleToken := f.SetupWithWallet(t)
defer f.Shutdown()
resp, err := f.Client.GenerateKey([]byte(walletHandleToken))
- require.NoError(t, err)
+ a.NoError(err)
pk1 := addrToPK(t, resp.Address)
resp, err = f.Client.GenerateKey([]byte(walletHandleToken))
- require.NoError(t, err)
+ a.NoError(err)
pk2 := addrToPK(t, resp.Address)
pk3 := crypto.PublicKey{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} // some public key we haven't imported
sender, err := f.Client.GenerateKey([]byte(walletHandleToken))
- require.NoError(t, err)
+ a.NoError(err)
pkSender := addrToPK(t, sender.Address)
// Create a 2-of-3 multisig account from the three public keys
_, err = f.Client.ImportMultisigAddr([]byte(walletHandleToken), 1, 2, []crypto.PublicKey{pk1, pk2, pk3})
- require.NoError(t, err)
+ a.NoError(err)
// Make a transaction spending from the multisig address
tx := transactions.Transaction{
@@ -355,28 +361,29 @@ func TestMultisigSignWithWrongSigner(t *testing.T) {
resp2 := kmdapi.APIV1POSTMultisigTransactionSignResponse{}
err = f.Client.DoV1Request(req2, &resp2)
- require.Error(t, err)
+ a.Error(err)
}
func TestMultisigSignProgram(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
walletHandleToken := f.SetupWithWallet(t)
defer f.Shutdown()
resp, err := f.Client.GenerateKey([]byte(walletHandleToken))
- require.NoError(t, err)
+ a.NoError(err)
pk1 := addrToPK(t, resp.Address)
resp, err = f.Client.GenerateKey([]byte(walletHandleToken))
- require.NoError(t, err)
+ a.NoError(err)
pk2 := addrToPK(t, resp.Address)
pk3 := crypto.PublicKey{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} // some public key we haven't imported
// Create a 2-of-3 multisig account from the three public keys
resp1, err := f.Client.ImportMultisigAddr([]byte(walletHandleToken), 1, 2, []crypto.PublicKey{pk1, pk2, pk3})
- require.NoError(t, err)
+ a.NoError(err)
msigAddr := addrToPK(t, resp1.Address)
program := []byte("blah blah blah, not a real program, just some bytes to sign, kmd does not have a program interpreter to know if the program is legitimate, but it _does_ prefix the program with protocol.Program and we can verify that here below")
@@ -392,11 +399,11 @@ func TestMultisigSignProgram(t *testing.T) {
}
resp2 := kmdapi.APIV1POSTMultisigProgramSignResponse{}
err = f.Client.DoV1Request(req2, &resp2)
- require.NoError(t, err)
+ a.NoError(err)
var msig crypto.MultisigSig
err = protocol.Decode(resp2.Multisig, &msig)
- require.NoError(t, err)
+ a.NoError(err)
// Try to add another signature
req3 := kmdapi.APIV1POSTMultisigProgramSignRequest{
@@ -409,12 +416,12 @@ func TestMultisigSignProgram(t *testing.T) {
}
resp3 := kmdapi.APIV1POSTMultisigProgramSignResponse{}
err = f.Client.DoV1Request(req3, &resp3)
- require.NoError(t, err)
+ a.NoError(err)
err = protocol.Decode(resp3.Multisig, &msig)
- require.NoError(t, err)
+ a.NoError(err)
ok, err := crypto.MultisigVerify(logic.Program(program), crypto.Digest(msigAddr), msig)
- require.NoError(t, err)
- require.True(t, ok)
+ a.NoError(err)
+ a.True(ok)
}
diff --git a/test/e2e-go/kmd/e2e_kmd_wallet_test.go b/test/e2e-go/kmd/e2e_kmd_wallet_test.go
index d263dba90..e0c7ccd9b 100644
--- a/test/e2e-go/kmd/e2e_kmd_wallet_test.go
+++ b/test/e2e-go/kmd/e2e_kmd_wallet_test.go
@@ -27,6 +27,7 @@ import (
)
func TestWalletCreation(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
f.Setup(t)
@@ -36,10 +37,10 @@ func TestWalletCreation(t *testing.T) {
req0 := kmdapi.APIV1GETWalletsRequest{}
resp0 := kmdapi.APIV1GETWalletsResponse{}
err := f.Client.DoV1Request(req0, &resp0)
- require.NoError(t, err)
+ a.NoError(err)
// Shouldn't be any wallets yet
- require.Equal(t, len(resp0.Wallets), 0)
+ a.Equal(len(resp0.Wallets), 0)
// Create a wallet
walletName := "default"
@@ -51,16 +52,16 @@ func TestWalletCreation(t *testing.T) {
}
resp1 := kmdapi.APIV1POSTWalletResponse{}
err = f.Client.DoV1Request(req1, &resp1)
- require.NoError(t, err)
+ a.NoError(err)
// Test that `GET /v1/wallets` returns the new wallet
req2 := kmdapi.APIV1GETWalletsRequest{}
resp2 := kmdapi.APIV1GETWalletsResponse{}
err = f.Client.DoV1Request(req2, &resp2)
- require.NoError(t, err)
+ a.NoError(err)
// Should be one wallet
- require.Equal(t, len(resp2.Wallets), 1)
+ a.Equal(len(resp2.Wallets), 1)
// Try to create a wallet with the same name
req3 := kmdapi.APIV1POSTWalletRequest{
@@ -72,10 +73,11 @@ func TestWalletCreation(t *testing.T) {
err = f.Client.DoV1Request(req3, &resp3)
// Should be an error
- require.Error(t, err)
+ a.Error(err)
}
func TestBlankWalletCreation(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
f.Setup(t)
@@ -89,25 +91,26 @@ func TestBlankWalletCreation(t *testing.T) {
}
resp0 := kmdapi.APIV1POSTWalletResponse{}
err := f.Client.DoV1Request(req0, &resp0)
- require.NoError(t, err)
+ a.NoError(err)
// Test that `GET /v1/wallets` returns the new wallet
req1 := kmdapi.APIV1GETWalletsRequest{}
resp1 := kmdapi.APIV1GETWalletsResponse{}
err = f.Client.DoV1Request(req1, &resp1)
- require.NoError(t, err)
+ a.NoError(err)
// Should be one wallet
- require.Equal(t, len(resp1.Wallets), 1)
+ a.Equal(len(resp1.Wallets), 1)
// Name should not be blank
- require.NotEmpty(t, resp1.Wallets[0].Name)
+ a.NotEmpty(resp1.Wallets[0].Name)
// Name should be equal to ID
- require.Equal(t, resp1.Wallets[0].Name, resp1.Wallets[0].ID)
+ a.Equal(resp1.Wallets[0].Name, resp1.Wallets[0].ID)
}
func TestWalletRename(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
f.Setup(t)
@@ -123,19 +126,19 @@ func TestWalletRename(t *testing.T) {
}
resp0 := kmdapi.APIV1POSTWalletResponse{}
err := f.Client.DoV1Request(req0, &resp0)
- require.NoError(t, err)
+ a.NoError(err)
// Test that `GET /v1/wallets` returns the new wallet
req1 := kmdapi.APIV1GETWalletsRequest{}
resp1 := kmdapi.APIV1GETWalletsResponse{}
err = f.Client.DoV1Request(req1, &resp1)
- require.NoError(t, err)
+ a.NoError(err)
// Should be one wallet
- require.Equal(t, len(resp1.Wallets), 1)
+ a.Equal(len(resp1.Wallets), 1)
// Name should be correct
- require.Equal(t, resp1.Wallets[0].Name, walletName)
+ a.Equal(resp1.Wallets[0].Name, walletName)
// Try to rename the wallet with the wrong password
newWalletName := "newWallet4u"
@@ -148,7 +151,7 @@ func TestWalletRename(t *testing.T) {
err = f.Client.DoV1Request(req2, &resp2)
// Should be an error
- require.Error(t, err)
+ a.Error(err)
// Try to rename the wallet with the correct password
req3 := kmdapi.APIV1POSTWalletRenameRequest{
@@ -160,31 +163,32 @@ func TestWalletRename(t *testing.T) {
err = f.Client.DoV1Request(req3, &resp3)
// Should succeed
- require.NoError(t, err)
+ a.NoError(err)
// Returned wallet should have the new name
- require.Equal(t, newWalletName, resp3.Wallet.Name)
+ a.Equal(newWalletName, resp3.Wallet.Name)
// Returned wallet should have the correct ID
- require.Equal(t, resp1.Wallets[0].ID, resp3.Wallet.ID)
+ a.Equal(resp1.Wallets[0].ID, resp3.Wallet.ID)
// Test that `GET /v1/wallets` returns the new wallet
req4 := kmdapi.APIV1GETWalletsRequest{}
resp4 := kmdapi.APIV1GETWalletsResponse{}
err = f.Client.DoV1Request(req4, &resp4)
- require.NoError(t, err)
+ a.NoError(err)
// Should be one wallet
- require.Equal(t, len(resp4.Wallets), 1)
+ a.Equal(len(resp4.Wallets), 1)
// Returned wallet should have the new name
- require.Equal(t, newWalletName, resp4.Wallets[0].Name)
+ a.Equal(newWalletName, resp4.Wallets[0].Name)
// Returned wallet should have the correct ID
- require.Equal(t, resp1.Wallets[0].ID, resp4.Wallets[0].ID)
+ a.Equal(resp1.Wallets[0].ID, resp4.Wallets[0].ID)
}
func TestWalletSessionRelease(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
walletHandleToken := f.SetupWithWallet(t)
@@ -196,10 +200,10 @@ func TestWalletSessionRelease(t *testing.T) {
}
resp0 := kmdapi.APIV1POSTWalletInfoResponse{}
err := f.Client.DoV1Request(req0, &resp0)
- require.NoError(t, err)
+ a.NoError(err)
// Should return the wallet we created
- require.Equal(t, resp0.WalletHandle.Wallet.Name, f.WalletName)
+ a.Equal(resp0.WalletHandle.Wallet.Name, f.WalletName)
// Test that `POST /v1/wallet/release` succeeds
req1 := kmdapi.APIV1POSTWalletReleaseRequest{
@@ -207,7 +211,7 @@ func TestWalletSessionRelease(t *testing.T) {
}
resp1 := kmdapi.APIV1POSTWalletReleaseResponse{}
err = f.Client.DoV1Request(req1, &resp1)
- require.NoError(t, err)
+ a.NoError(err)
// Test that `POST /v1/wallet/info` no longer works with this token
req2 := kmdapi.APIV1POSTWalletInfoRequest{
@@ -217,13 +221,14 @@ func TestWalletSessionRelease(t *testing.T) {
err = f.Client.DoV1Request(req2, &resp2)
// Error response
- require.Error(t, err)
+ a.Error(err)
// Should not return the wallet we created
- require.NotEqual(t, resp2.WalletHandle.Wallet.Name, f.WalletName)
+ a.NotEqual(resp2.WalletHandle.Wallet.Name, f.WalletName)
}
func TestWalletSessionRenew(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
walletHandleToken := f.SetupWithWallet(t)
@@ -235,7 +240,7 @@ func TestWalletSessionRenew(t *testing.T) {
}
resp0 := kmdapi.APIV1POSTWalletInfoResponse{}
err := f.Client.DoV1Request(req0, &resp0)
- require.NoError(t, err)
+ a.NoError(err)
// Note # seconds until expiration
expiresSecsInitial := resp0.WalletHandle.ExpiresSeconds
@@ -249,11 +254,11 @@ func TestWalletSessionRenew(t *testing.T) {
}
resp1 := kmdapi.APIV1POSTWalletInfoResponse{}
err = f.Client.DoV1Request(req1, &resp1)
- require.NoError(t, err)
+ a.NoError(err)
// Should have decreased
expiresSecsLater := resp1.WalletHandle.ExpiresSeconds
- require.True(t, expiresSecsLater < expiresSecsInitial)
+ a.True(expiresSecsLater < expiresSecsInitial)
// Renew the handle
req2 := kmdapi.APIV1POSTWalletRenewRequest{
@@ -261,14 +266,15 @@ func TestWalletSessionRenew(t *testing.T) {
}
resp2 := kmdapi.APIV1POSTWalletRenewResponse{}
err = f.Client.DoV1Request(req2, &resp2)
- require.NoError(t, err)
+ a.NoError(err)
// Should have increased
expiresSecsRenewed := resp2.WalletHandle.ExpiresSeconds
- require.True(t, expiresSecsRenewed > expiresSecsLater)
+ a.True(expiresSecsRenewed > expiresSecsLater)
}
func TestWalletSessionExpiry(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
t.Parallel()
var f fixtures.KMDFixture
// Write a config for 1 second session expirations
@@ -276,7 +282,7 @@ func TestWalletSessionExpiry(t *testing.T) {
f.SetupWithConfig(t, cfg)
walletHandleToken, err := f.MakeWalletAndHandleToken()
defer f.Shutdown()
- require.NoError(t, err)
+ a.NoError(err)
// Get deets about this wallet token to confirm the token works
req0 := kmdapi.APIV1POSTWalletInfoRequest{
@@ -284,7 +290,7 @@ func TestWalletSessionExpiry(t *testing.T) {
}
resp0 := kmdapi.APIV1POSTWalletInfoResponse{}
err = f.Client.DoV1Request(req0, &resp0)
- require.NoError(t, err)
+ a.NoError(err)
// Wait for token to expire
time.Sleep(2 * time.Second)
@@ -297,5 +303,5 @@ func TestWalletSessionExpiry(t *testing.T) {
err = f.Client.DoV1Request(req1, &resp1)
// Token should have expired
- require.Error(t, err)
+ a.Error(err)
}
diff --git a/test/e2e-go/perf/basic_test.go b/test/e2e-go/perf/basic_test.go
index 085072ede..d9748e5cd 100644
--- a/test/e2e-go/perf/basic_test.go
+++ b/test/e2e-go/perf/basic_test.go
@@ -59,6 +59,7 @@ func queuePayments(b *testing.B, wg *sync.WaitGroup, c libgoal.Client, q <-chan
}
func signer(b *testing.B, wg *sync.WaitGroup, c libgoal.Client, wh []byte, txnChan <-chan *transactions.Transaction, sigTxnChan chan<- *transactions.SignedTxn) {
+ a := require.New(fixtures.SynchronizedTest(b))
for {
txn := <-txnChan
if txn == nil {
@@ -69,7 +70,7 @@ func signer(b *testing.B, wg *sync.WaitGroup, c libgoal.Client, wh []byte, txnCh
if err != nil {
fmt.Printf("Error signing: %v\n", err)
}
- require.NoError(b, err)
+ a.NoError(err)
sigTxnChan <- &stxn
}
@@ -84,6 +85,7 @@ func BenchmarkPaymentsThroughput(b *testing.B) {
}
func doBenchTemplate(b *testing.B, template string, moneynode string) {
+ a := require.New(fixtures.SynchronizedTest(b))
fmt.Printf("Starting to benchmark template %s\n", template)
// consensusTestBigBlocks is a version of ConsensusV0 used for testing
@@ -106,15 +108,15 @@ func doBenchTemplate(b *testing.B, template string, moneynode string) {
c := fixture.GetLibGoalClientForNamedNode(moneynode)
wallet, err := c.GetUnencryptedWalletHandle()
- require.NoError(b, err)
+ a.NoError(err)
addrs, err := c.ListAddresses(wallet)
- require.NoError(b, err)
- require.True(b, len(addrs) > 0)
+ a.NoError(err)
+ a.True(len(addrs) > 0)
addr := addrs[0]
suggest, err := c.SuggestedParams()
- require.NoError(b, err)
+ a.NoError(err)
var genesisHash crypto.Digest
copy(genesisHash[:], suggest.GenesisHash)
@@ -133,7 +135,7 @@ func doBenchTemplate(b *testing.B, template string, moneynode string) {
fmt.Printf("Pre-signing %d transactions..\n", numTransactions)
wh, err := c.GetUnencryptedWalletHandle()
- require.NoError(b, err)
+ a.NoError(err)
var sigWg sync.WaitGroup
txnChan := make(chan *transactions.Transaction, 100)
@@ -145,13 +147,13 @@ func doBenchTemplate(b *testing.B, template string, moneynode string) {
go func() {
sender, err := basics.UnmarshalChecksumAddress(addr)
- require.NoError(b, err)
+ a.NoError(err)
round, err := c.CurrentRound()
- require.NoError(b, err)
+ a.NoError(err)
params, err := c.SuggestedParams()
- require.NoError(b, err)
+ a.NoError(err)
proto := config.Consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
for txi := 0; txi < numTransactions; txi++ {
@@ -192,11 +194,11 @@ func doBenchTemplate(b *testing.B, template string, moneynode string) {
}
status, err = c.Status()
- require.NoError(b, err)
+ a.NoError(err)
fmt.Printf("Waiting for round %d to start benchmark..\n", status.LastRound+1)
status, err = c.WaitForRound(status.LastRound + 1)
- require.NoError(b, err)
+ a.NoError(err)
b.StartTimer()
@@ -232,7 +234,7 @@ func doBenchTemplate(b *testing.B, template string, moneynode string) {
_, err = fixture.WaitForConfirmedTxn(status.LastRound+100, addr, tx.ID().String())
fmt.Printf("Waiting for confirmation transaction to commit..\n")
- require.NoError(b, err)
+ a.NoError(err)
}
})
diff --git a/test/e2e-go/restAPI/restClient_test.go b/test/e2e-go/restAPI/restClient_test.go
index 77d39199f..a53e808b8 100644
--- a/test/e2e-go/restAPI/restClient_test.go
+++ b/test/e2e-go/restAPI/restClient_test.go
@@ -106,10 +106,11 @@ func isLetterOrSpace(s string) bool {
}
func getMaxBalAddr(t *testing.T, testClient libgoal.Client, addresses []string) (someBal uint64, someAddress string) {
+ a := require.New(fixtures.SynchronizedTest(t))
someBal = 0
for _, addr := range addresses {
bal, err := testClient.GetBalance(addr)
- require.NoError(t, err)
+ a.NoError(err)
if bal > someBal {
someAddress = addr
someBal = bal
@@ -119,6 +120,7 @@ func getMaxBalAddr(t *testing.T, testClient libgoal.Client, addresses []string)
}
func getDestAddr(t *testing.T, testClient libgoal.Client, addresses []string, someAddress string, wh []byte) (toAddress string) {
+ a := require.New(fixtures.SynchronizedTest(t))
if len(addresses) > 1 {
for _, addr := range addresses {
if addr != someAddress {
@@ -129,11 +131,12 @@ func getDestAddr(t *testing.T, testClient libgoal.Client, addresses []string, so
}
var err error
toAddress, err = testClient.GenerateAddress(wh)
- require.NoError(t, err)
+ a.NoError(err)
return
}
func waitForRoundOne(t *testing.T, testClient libgoal.Client) {
+ a := require.New(fixtures.SynchronizedTest(t))
errchan := make(chan error)
quit := make(chan struct{})
go func() {
@@ -145,7 +148,7 @@ func waitForRoundOne(t *testing.T, testClient libgoal.Client) {
}()
select {
case err := <-errchan:
- require.NoError(t, err)
+ a.NoError(err)
case <-time.After(1 * time.Minute): // Wait 1 minute (same as WaitForRound)
close(quit)
t.Fatalf("%s: timeout waiting for round 1", t.Name())
@@ -155,8 +158,9 @@ func waitForRoundOne(t *testing.T, testClient libgoal.Client) {
var errWaitForTransactionTimeout = errors.New("wait for transaction timed out")
func waitForTransaction(t *testing.T, testClient libgoal.Client, fromAddress, txID string, timeout time.Duration) (tx v1.Transaction, err error) {
+ a := require.New(fixtures.SynchronizedTest(t))
rnd, err := testClient.Status()
- require.NoError(t, err)
+ a.NoError(err)
if rnd.LastRound == 0 {
t.Fatal("it is currently round 0 but we need to wait for a transaction that might happen this round but we'll never know if that happens because ConfirmedRound==0 is indestinguishable from not having happened")
}
@@ -167,8 +171,8 @@ func waitForTransaction(t *testing.T, testClient libgoal.Client, fromAddress, tx
tx, err = testClient.PendingTransactionInformation(txID)
}
if err == nil {
- require.NotEmpty(t, tx)
- require.Empty(t, tx.PoolError)
+ a.NotEmpty(tx)
+ a.Empty(tx.PoolError)
if tx.ConfirmedRound > 0 {
return
}
@@ -182,31 +186,34 @@ func waitForTransaction(t *testing.T, testClient libgoal.Client, fromAddress, tx
}
func TestClientCanGetStatus(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
statusResponse, err := testClient.Status()
- require.NoError(t, err)
- require.NotEmpty(t, statusResponse)
+ a.NoError(err)
+ a.NotEmpty(statusResponse)
testClient.SetAPIVersionAffinity(algodclient.APIVersionV2, kmdclient.APIVersionV1)
statusResponse2, err := testClient.Status()
- require.NoError(t, err)
- require.NotEmpty(t, statusResponse2)
- require.True(t, statusResponse2.LastRound >= statusResponse.LastRound)
+ a.NoError(err)
+ a.NotEmpty(statusResponse2)
+ a.True(statusResponse2.LastRound >= statusResponse.LastRound)
}
func TestClientCanGetStatusAfterBlock(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
statusResponse, err := testClient.WaitForRound(1)
- require.NoError(t, err)
- require.NotEmpty(t, statusResponse)
+ a.NoError(err)
+ a.NotEmpty(statusResponse)
testClient.SetAPIVersionAffinity(algodclient.APIVersionV2, kmdclient.APIVersionV1)
statusResponse, err = testClient.WaitForRound(statusResponse.LastRound + 1)
- require.NoError(t, err)
- require.NotEmpty(t, statusResponse)
+ a.NoError(err)
+ a.NotEmpty(statusResponse)
}
func TestTransactionsByAddr(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
var localFixture fixtures.RestClientFixture
localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
defer localFixture.Shutdown()
@@ -214,185 +221,196 @@ func TestTransactionsByAddr(t *testing.T) {
testClient := localFixture.LibGoalClient
waitForRoundOne(t, testClient)
wh, err := testClient.GetUnencryptedWalletHandle()
- require.NoError(t, err)
+ a.NoError(err)
addresses, err := testClient.ListAddresses(wh)
- require.NoError(t, err)
+ a.NoError(err)
_, someAddress := getMaxBalAddr(t, testClient, addresses)
if someAddress == "" {
t.Error("no addr with funds")
}
toAddress := getDestAddr(t, testClient, addresses, someAddress, wh)
tx, err := testClient.SendPaymentFromWallet(wh, nil, someAddress, toAddress, 10000, 100000, nil, "", 0, 0)
- require.NoError(t, err)
+ a.NoError(err)
txID := tx.ID()
rnd, err := testClient.Status()
- require.NoError(t, err)
+ a.NoError(err)
t.Logf("rnd[%d] created txn %s", rnd.LastRound, txID)
_, err = waitForTransaction(t, testClient, someAddress, txID.String(), 15*time.Second)
- require.NoError(t, err)
+ a.NoError(err)
// what is my round?
rnd, err = testClient.Status()
- require.NoError(t, err)
+ a.NoError(err)
t.Logf("rnd %d", rnd.LastRound)
// Now let's get the transaction
restClient, err := localFixture.NC.AlgodClient()
- require.NoError(t, err)
+ a.NoError(err)
res, err := restClient.TransactionsByAddr(toAddress, 0, rnd.LastRound, 100)
- require.NoError(t, err)
- require.Equal(t, 1, len(res.Transactions))
+ a.NoError(err)
+ a.Equal(1, len(res.Transactions))
for _, tx := range res.Transactions {
- require.Equal(t, tx.From, someAddress)
- require.Equal(t, tx.Payment.Amount, uint64(100000))
- require.Equal(t, tx.Fee, uint64(10000))
+ a.Equal(tx.From, someAddress)
+ a.Equal(tx.Payment.Amount, uint64(100000))
+ a.Equal(tx.Fee, uint64(10000))
}
}
func TestClientCanGetVersion(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
versionResponse, err := testClient.AlgodVersions()
- require.NoError(t, err)
- require.NotEmpty(t, versionResponse)
+ a.NoError(err)
+ a.NotEmpty(versionResponse)
}
func TestClientCanGetSuggestedFee(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
suggestedFeeResponse, err := testClient.SuggestedFee()
- require.NoError(t, err)
+ a.NoError(err)
_ = suggestedFeeResponse // per-byte-fee is allowed to be zero
}
func TestClientCanGetMinTxnFee(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
suggestedParamsRes, err := testClient.SuggestedParams()
- require.NoError(t, err)
- require.Truef(t, suggestedParamsRes.MinTxnFee > 0, "min txn fee not supplied")
+ a.NoError(err)
+ a.Truef(suggestedParamsRes.MinTxnFee > 0, "min txn fee not supplied")
}
func TestClientCanGetBlockInfo(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
waitForRoundOne(t, testClient)
blockResponse, err := testClient.Block(1)
- require.NoError(t, err)
- require.NotEmpty(t, blockResponse)
+ a.NoError(err)
+ a.NotEmpty(blockResponse)
}
func TestClientRejectsBadFromAddressWhenSending(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
wh, err := testClient.GetUnencryptedWalletHandle()
- require.NoError(t, err)
+ a.NoError(err)
addresses, err := testClient.ListAddresses(wh)
- require.NoError(t, err)
+ a.NoError(err)
badAccountAddress := "This is absolutely not a valid account address."
goodAccountAddress := addresses[0]
_, err = testClient.SendPaymentFromWallet(wh, nil, badAccountAddress, goodAccountAddress, 10000, 100000, nil, "", 0, 0)
- require.Error(t, err)
+ a.Error(err)
}
func TestClientRejectsBadToAddressWhenSending(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
wh, err := testClient.GetUnencryptedWalletHandle()
- require.NoError(t, err)
+ a.NoError(err)
addresses, err := testClient.ListAddresses(wh)
- require.NoError(t, err)
+ a.NoError(err)
badAccountAddress := "This is absolutely not a valid account address."
goodAccountAddress := addresses[0]
_, err = testClient.SendPaymentFromWallet(wh, nil, goodAccountAddress, badAccountAddress, 10000, 100000, nil, "", 0, 0)
- require.Error(t, err)
+ a.Error(err)
}
func TestClientRejectsMutatedFromAddressWhenSending(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
wh, err := testClient.GetUnencryptedWalletHandle()
- require.NoError(t, err)
+ a.NoError(err)
addresses, err := testClient.ListAddresses(wh)
- require.NoError(t, err)
+ a.NoError(err)
goodAccountAddress := addresses[0]
var unmutatedAccountAddress string
if len(addresses) > 1 {
unmutatedAccountAddress = addresses[1]
} else {
unmutatedAccountAddress, err = testClient.GenerateAddress(wh)
- require.NoError(t, err)
+ a.NoError(err)
}
mutatedAccountAddress := mutateStringAtIndex(unmutatedAccountAddress, 0)
_, err = testClient.SendPaymentFromWallet(wh, nil, mutatedAccountAddress, goodAccountAddress, 10000, 100000, nil, "", 0, 0)
- require.Error(t, err)
+ a.Error(err)
}
func TestClientRejectsMutatedToAddressWhenSending(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
wh, err := testClient.GetUnencryptedWalletHandle()
- require.NoError(t, err)
+ a.NoError(err)
addresses, err := testClient.ListAddresses(wh)
- require.NoError(t, err)
+ a.NoError(err)
goodAccountAddress := addresses[0]
var unmutatedAccountAddress string
if len(addresses) > 1 {
unmutatedAccountAddress = addresses[1]
} else {
unmutatedAccountAddress, err = testClient.GenerateAddress(wh)
- require.NoError(t, err)
+ a.NoError(err)
}
mutatedAccountAddress := mutateStringAtIndex(unmutatedAccountAddress, 0)
_, err = testClient.SendPaymentFromWallet(wh, nil, goodAccountAddress, mutatedAccountAddress, 10000, 100000, nil, "", 0, 0)
- require.Error(t, err)
+ a.Error(err)
}
func TestClientRejectsSendingMoneyFromAccountForWhichItHasNoKey(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
wh, err := testClient.GetUnencryptedWalletHandle()
- require.NoError(t, err)
+ a.NoError(err)
addresses, err := testClient.ListAddresses(wh)
- require.NoError(t, err)
+ a.NoError(err)
goodAccountAddress := addresses[0]
nodeDoesNotHaveKeyForThisAddress := "NJY27OQ2ZXK6OWBN44LE4K43TA2AV3DPILPYTHAJAMKIVZDWTEJKZJKO4A"
_, err = testClient.SendPaymentFromWallet(wh, nil, nodeDoesNotHaveKeyForThisAddress, goodAccountAddress, 10000, 100000, nil, "", 0, 0)
- require.Error(t, err)
+ a.Error(err)
}
func TestClientOversizedNote(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
waitForRoundOne(t, testClient)
wh, err := testClient.GetUnencryptedWalletHandle()
- require.NoError(t, err)
+ a.NoError(err)
addresses, err := testClient.ListAddresses(wh)
- require.NoError(t, err)
+ a.NoError(err)
fromAddress := addresses[0]
var toAddress string
if len(addresses) > 1 {
toAddress = addresses[1]
} else {
toAddress, err = testClient.GenerateAddress(wh)
- require.NoError(t, err)
+ a.NoError(err)
}
maxTxnNoteBytes := config.Consensus[protocol.ConsensusCurrentVersion].MaxTxnNoteBytes
note := make([]byte, maxTxnNoteBytes+1)
_, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, 10000, 100000, note, "", 0, 0)
- require.Error(t, err)
+ a.Error(err)
}
func TestClientCanSendAndGetNote(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
waitForRoundOne(t, testClient)
wh, err := testClient.GetUnencryptedWalletHandle()
- require.NoError(t, err)
+ a.NoError(err)
addresses, err := testClient.ListAddresses(wh)
- require.NoError(t, err)
+ a.NoError(err)
_, someAddress := getMaxBalAddr(t, testClient, addresses)
if someAddress == "" {
t.Error("no addr with funds")
@@ -401,20 +419,21 @@ func TestClientCanSendAndGetNote(t *testing.T) {
maxTxnNoteBytes := config.Consensus[protocol.ConsensusCurrentVersion].MaxTxnNoteBytes
note := make([]byte, maxTxnNoteBytes)
tx, err := testClient.SendPaymentFromWallet(wh, nil, someAddress, toAddress, 10000, 100000, note, "", 0, 0)
- require.NoError(t, err)
+ a.NoError(err)
txStatus, err := waitForTransaction(t, testClient, someAddress, tx.ID().String(), 15*time.Second)
- require.NoError(t, err)
- require.Equal(t, note, txStatus.Note)
+ a.NoError(err)
+ a.Equal(note, txStatus.Note)
}
func TestClientCanGetTransactionStatus(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
waitForRoundOne(t, testClient)
wh, err := testClient.GetUnencryptedWalletHandle()
- require.NoError(t, err)
+ a.NoError(err)
addresses, err := testClient.ListAddresses(wh)
- require.NoError(t, err)
+ a.NoError(err)
_, someAddress := getMaxBalAddr(t, testClient, addresses)
if someAddress == "" {
t.Error("no addr with funds")
@@ -422,55 +441,57 @@ func TestClientCanGetTransactionStatus(t *testing.T) {
toAddress := getDestAddr(t, testClient, addresses, someAddress, wh)
tx, err := testClient.SendPaymentFromWallet(wh, nil, someAddress, toAddress, 10000, 100000, nil, "", 0, 0)
t.Log(string(protocol.EncodeJSON(tx)))
- require.NoError(t, err)
+ a.NoError(err)
t.Log(tx.ID().String())
_, err = waitForTransaction(t, testClient, someAddress, tx.ID().String(), 15*time.Second)
- require.NoError(t, err)
+ a.NoError(err)
}
func TestAccountBalance(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
waitForRoundOne(t, testClient)
wh, err := testClient.GetUnencryptedWalletHandle()
- require.NoError(t, err)
+ a.NoError(err)
addresses, err := testClient.ListAddresses(wh)
- require.NoError(t, err)
+ a.NoError(err)
_, someAddress := getMaxBalAddr(t, testClient, addresses)
if someAddress == "" {
t.Error("no addr with funds")
}
toAddress, err := testClient.GenerateAddress(wh)
- require.NoError(t, err)
+ a.NoError(err)
tx, err := testClient.SendPaymentFromWallet(wh, nil, someAddress, toAddress, 10000, 100000, nil, "", 0, 0)
- require.NoError(t, err)
+ a.NoError(err)
_, err = waitForTransaction(t, testClient, someAddress, tx.ID().String(), 15*time.Second)
- require.NoError(t, err)
+ a.NoError(err)
account, err := testClient.AccountInformation(toAddress)
- require.NoError(t, err)
- require.Equal(t, account.AmountWithoutPendingRewards, uint64(100000))
- require.Truef(t, account.Amount >= 100000, "account must have received money, and account information endpoint must print it")
+ a.NoError(err)
+ a.Equal(account.AmountWithoutPendingRewards, uint64(100000))
+ a.Truef(account.Amount >= 100000, "account must have received money, and account information endpoint must print it")
}
func TestAccountParticipationInfo(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
waitForRoundOne(t, testClient)
wh, err := testClient.GetUnencryptedWalletHandle()
- require.NoError(t, err)
+ a.NoError(err)
addresses, err := testClient.ListAddresses(wh)
- require.NoError(t, err)
+ a.NoError(err)
_, someAddress := getMaxBalAddr(t, testClient, addresses)
if someAddress == "" {
t.Error("no addr with funds")
}
- require.NoError(t, err)
+ a.NoError(err)
addr, err := basics.UnmarshalChecksumAddress(someAddress)
params, err := testClient.SuggestedParams()
- require.NoError(t, err)
+ a.NoError(err)
firstRound := basics.Round(params.LastRound + 1)
lastRound := basics.Round(params.LastRound + 1000)
@@ -501,90 +522,94 @@ func TestAccountParticipationInfo(t *testing.T) {
},
}
txID, err := testClient.SignAndBroadcastTransaction(wh, nil, tx)
- require.NoError(t, err)
+ a.NoError(err)
_, err = waitForTransaction(t, testClient, someAddress, txID, 15*time.Second)
- require.NoError(t, err)
+ a.NoError(err)
account, err := testClient.AccountInformation(someAddress)
- require.NoError(t, err)
- require.Equal(t, randomVotePKStr, string(account.Participation.ParticipationPK), "API must print correct root voting key")
- require.Equal(t, randomSelPKStr, string(account.Participation.VRFPK), "API must print correct vrf key")
- require.Equal(t, uint64(firstRound), account.Participation.VoteFirst, "API must print correct first participation round")
- require.Equal(t, uint64(lastRound), account.Participation.VoteLast, "API must print correct last participation round")
- require.Equal(t, dilution, account.Participation.VoteKeyDilution, "API must print correct key dilution")
+ a.NoError(err)
+ a.Equal(randomVotePKStr, string(account.Participation.ParticipationPK), "API must print correct root voting key")
+ a.Equal(randomSelPKStr, string(account.Participation.VRFPK), "API must print correct vrf key")
+ a.Equal(uint64(firstRound), account.Participation.VoteFirst, "API must print correct first participation round")
+ a.Equal(uint64(lastRound), account.Participation.VoteLast, "API must print correct last participation round")
+ a.Equal(dilution, account.Participation.VoteKeyDilution, "API must print correct key dilution")
}
func TestSupply(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
supply, err := testClient.LedgerSupply()
- require.NoError(t, err)
- require.True(t, supply.TotalMoney > 1e6)
- require.True(t, supply.OnlineMoney > 1e6)
- require.True(t, supply.TotalMoney >= supply.OnlineMoney)
+ a.NoError(err)
+ a.True(supply.TotalMoney > 1e6)
+ a.True(supply.OnlineMoney > 1e6)
+ a.True(supply.TotalMoney >= supply.OnlineMoney)
}
func TestClientCanGetGoRoutines(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.AlgodClient
ctx, ctxCancel := context.WithCancel(context.Background())
defer ctxCancel()
goRoutines, err := testClient.GetGoRoutines(ctx)
- require.NoError(t, err)
- require.NotEmpty(t, goRoutines)
- require.True(t, strings.Index(goRoutines, "goroutine profile:") >= 0)
+ a.NoError(err)
+ a.NotEmpty(goRoutines)
+ a.True(strings.Index(goRoutines, "goroutine profile:") >= 0)
}
func TestSendingTooMuchFails(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
wh, err := testClient.GetUnencryptedWalletHandle()
- require.NoError(t, err)
+ a.NoError(err)
addresses, err := testClient.ListAddresses(wh)
- require.NoError(t, err)
+ a.NoError(err)
fromAddress := addresses[0]
var toAddress string
if len(addresses) > 1 {
toAddress = addresses[1]
} else {
toAddress, err = testClient.GenerateAddress(wh)
- require.NoError(t, err)
+ a.NoError(err)
}
fromBalance, err := testClient.GetBalance(fromAddress)
- require.NoError(t, err)
+ a.NoError(err)
// too much amount
_, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, 10000, fromBalance+100, nil, "", 0, 0)
t.Log(err)
- require.Error(t, err)
+ a.Error(err)
// waaaay too much amount
_, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, 10000, math.MaxUint64, nil, "", 0, 0)
t.Log(err)
- require.Error(t, err)
+ a.Error(err)
// too much fee
_, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, fromBalance+100, 10000, nil, "", 0, 0)
t.Log(err)
- require.Error(t, err)
+ a.Error(err)
// waaaay too much fee
_, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, math.MaxUint64, 10000, nil, "", 0, 0)
t.Log(err)
- require.Error(t, err)
+ a.Error(err)
}
func TestSendingFromEmptyAccountFails(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
waitForRoundOne(t, testClient)
wh, err := testClient.GetUnencryptedWalletHandle()
- require.NoError(t, err)
+ a.NoError(err)
addresses, err := testClient.ListAddresses(wh)
- require.NoError(t, err)
+ a.NoError(err)
var fromAddress string
for _, addr := range addresses {
bal, err := testClient.GetBalance(addr)
- require.NoError(t, err)
+ a.NoError(err)
if bal == 0 {
fromAddress = addr
break
@@ -592,7 +617,7 @@ func TestSendingFromEmptyAccountFails(t *testing.T) {
}
if fromAddress == "" {
fromAddress, err = testClient.GenerateAddress(wh)
- require.NoError(t, err)
+ a.NoError(err)
}
var toAddress string
for _, addr := range addresses {
@@ -603,24 +628,25 @@ func TestSendingFromEmptyAccountFails(t *testing.T) {
}
if toAddress == "" {
toAddress, err = testClient.GenerateAddress(wh)
- require.NoError(t, err)
+ a.NoError(err)
}
_, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, 10000, 100000, nil, "", 0, 0)
- require.Error(t, err)
+ a.Error(err)
}
func TestSendingTooLittleToEmptyAccountFails(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
waitForRoundOne(t, testClient)
wh, err := testClient.GetUnencryptedWalletHandle()
- require.NoError(t, err)
+ a.NoError(err)
addresses, err := testClient.ListAddresses(wh)
- require.NoError(t, err)
+ a.NoError(err)
var emptyAddress string
for _, addr := range addresses {
bal, err := testClient.GetBalance(addr)
- require.NoError(t, err)
+ a.NoError(err)
if bal == 0 {
emptyAddress = addr
break
@@ -628,23 +654,24 @@ func TestSendingTooLittleToEmptyAccountFails(t *testing.T) {
}
if emptyAddress == "" {
emptyAddress, err = testClient.GenerateAddress(wh)
- require.NoError(t, err)
+ a.NoError(err)
}
_, someAddress := getMaxBalAddr(t, testClient, addresses)
if someAddress == "" {
t.Error("no addr with funds")
}
_, err = testClient.SendPaymentFromWallet(wh, nil, someAddress, emptyAddress, 10000, 1, nil, "", 0, 0)
- require.Error(t, err)
+ a.Error(err)
}
func TestSendingLowFeeFails(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
defer fixture.SetTestContext(t)()
testClient := fixture.LibGoalClient
wh, err := testClient.GetUnencryptedWalletHandle()
- require.NoError(t, err)
+ a.NoError(err)
addresses, err := testClient.ListAddresses(wh)
- require.NoError(t, err)
+ a.NoError(err)
const sendAmount = 100000
someBal, someAddress := getMaxBalAddr(t, testClient, addresses)
if someAddress == "" {
@@ -655,35 +682,36 @@ func TestSendingLowFeeFails(t *testing.T) {
}
toAddress := getDestAddr(t, testClient, addresses, someAddress, wh)
utx, err := testClient.ConstructPayment(someAddress, toAddress, 1, sendAmount, nil, "", [32]byte{}, 0, 0)
- require.NoError(t, err)
+ a.NoError(err)
utx.Fee.Raw = 1
stx, err := testClient.SignTransactionWithWallet(wh, nil, utx)
- require.NoError(t, err)
+ a.NoError(err)
_, err = testClient.BroadcastTransaction(stx)
t.Log(err)
- require.Error(t, err)
+ a.Error(err)
utx.Fee.Raw = 0
stx, err = testClient.SignTransactionWithWallet(wh, nil, utx)
- require.NoError(t, err)
+ a.NoError(err)
_, err = testClient.BroadcastTransaction(stx)
t.Log(err)
- require.Error(t, err)
+ a.Error(err)
}
func TestSendingNotClosingAccountFails(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
// use a local fixture because we might really mess with the balances
var localFixture fixtures.RestClientFixture
localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
defer localFixture.Shutdown()
testClient := localFixture.LibGoalClient
wh, err := testClient.GetUnencryptedWalletHandle()
- require.NoError(t, err)
+ a.NoError(err)
addresses, err := testClient.ListAddresses(wh)
- require.NoError(t, err)
+ a.NoError(err)
var emptyAddress string
for _, addr := range addresses {
bal, err := testClient.GetBalance(addr)
- require.NoError(t, err)
+ a.NoError(err)
if bal == 0 {
emptyAddress = addr
break
@@ -691,14 +719,14 @@ func TestSendingNotClosingAccountFails(t *testing.T) {
}
if emptyAddress == "" {
emptyAddress, err = testClient.GenerateAddress(wh)
- require.NoError(t, err)
+ a.NoError(err)
}
var someAddress string
someBal := uint64(0)
for _, addr := range addresses {
if addr != emptyAddress {
bal, err := testClient.GetBalance(addr)
- require.NoError(t, err)
+ a.NoError(err)
if bal > someBal {
someAddress = addr
someBal = bal
@@ -710,10 +738,11 @@ func TestSendingNotClosingAccountFails(t *testing.T) {
}
amt := someBal - 10000 - 1
_, err = testClient.SendPaymentFromWallet(wh, nil, someAddress, emptyAddress, 10000, amt, nil, "", 0, 0)
- require.Error(t, err)
+ a.Error(err)
}
func TestClientCanGetPendingTransactions(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
var localFixture fixtures.RestClientFixture
localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
defer localFixture.Shutdown()
@@ -726,24 +755,25 @@ func TestClientCanGetPendingTransactions(t *testing.T) {
// We may not need to kill the other node, but do it anyways to ensure the txn never gets committed
nc, _ := localFixture.GetNodeController("Node")
err := nc.FullStop()
- require.NoError(t, err)
+ a.NoError(err)
minTxnFee, minAcctBalance, err := localFixture.CurrentMinFeeAndBalance()
- require.NoError(t, err)
+ a.NoError(err)
// Check that a single pending txn is corectly displayed
tx, err := testClient.SendPaymentFromUnencryptedWallet(fromAddress, toAddress, minTxnFee, minAcctBalance, nil)
- require.NoError(t, err)
+ a.NoError(err)
statusResponse, err := testClient.GetPendingTransactions(0)
- require.NoError(t, err)
- require.NotEmpty(t, statusResponse)
- require.True(t, statusResponse.TotalTxns == 1)
- require.True(t, len(statusResponse.TruncatedTxns.Transactions) == 1)
- require.True(t, statusResponse.TruncatedTxns.Transactions[0].TxID == tx.ID().String())
+ a.NoError(err)
+ a.NotEmpty(statusResponse)
+ a.True(statusResponse.TotalTxns == 1)
+ a.True(len(statusResponse.TruncatedTxns.Transactions) == 1)
+ a.True(statusResponse.TruncatedTxns.Transactions[0].TxID == tx.ID().String())
}
func TestClientTruncatesPendingTransactions(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
var localFixture fixtures.RestClientFixture
localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
defer localFixture.Shutdown()
@@ -752,10 +782,10 @@ func TestClientTruncatesPendingTransactions(t *testing.T) {
wh, _ := testClient.GetUnencryptedWalletHandle()
nc, _ := localFixture.GetNodeController("Node")
err := nc.FullStop()
- require.NoError(t, err)
+ a.NoError(err)
minTxnFee, minAcctBalance, err := localFixture.CurrentMinFeeAndBalance()
- require.NoError(t, err)
+ a.NoError(err)
NumTxns := 10
MaxTxns := 7
@@ -765,25 +795,25 @@ func TestClientTruncatesPendingTransactions(t *testing.T) {
for i := 0; i < NumTxns; i++ {
toAddress, _ := testClient.GenerateAddress(wh)
tx2, err := testClient.SendPaymentFromUnencryptedWallet(fromAddress, toAddress, minTxnFee, minAcctBalance, nil)
- require.NoError(t, err)
+ a.NoError(err)
txIDsSeen[tx2.ID().String()] = true
}
statusResponse, err := testClient.GetPendingTransactions(uint64(MaxTxns))
- require.NoError(t, err)
- require.NotEmpty(t, statusResponse)
- require.True(t, int(statusResponse.TotalTxns) == NumTxns)
- require.True(t, len(statusResponse.TruncatedTxns.Transactions) == MaxTxns)
+ a.NoError(err)
+ a.NotEmpty(statusResponse)
+ a.True(int(statusResponse.TotalTxns) == NumTxns)
+ a.True(len(statusResponse.TruncatedTxns.Transactions) == MaxTxns)
for _, tx := range statusResponse.TruncatedTxns.Transactions {
- require.True(t, txIDsSeen[tx.TxID])
+ a.True(txIDsSeen[tx.TxID])
delete(txIDsSeen, tx.TxID)
}
- require.True(t, len(txIDsSeen) == NumTxns-MaxTxns)
+ a.True(len(txIDsSeen) == NumTxns-MaxTxns)
}
func TestClientPrioritizesPendingTransactions(t *testing.T) {
t.Skip("new FIFO pool does not have prioritization")
-
+ a := require.New(fixtures.SynchronizedTest(t))
var localFixture fixtures.RestClientFixture
localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
defer localFixture.Shutdown()
@@ -795,28 +825,28 @@ func TestClientPrioritizesPendingTransactions(t *testing.T) {
toAddress, _ := testClient.GenerateAddress(wh)
nc, _ := localFixture.GetNodeController("Node")
err := nc.FullStop()
- require.NoError(t, err)
+ a.NoError(err)
minTxnFee, minAcctBalance, err := localFixture.CurrentMinFeeAndBalance()
- require.NoError(t, err)
+ a.NoError(err)
NumTxns := 5
MaxTxns := 3
for i := 0; i < NumTxns; i++ {
toAddress2, _ := testClient.GenerateAddress(wh)
_, err := testClient.SendPaymentFromUnencryptedWallet(fromAddress, toAddress2, minTxnFee, minAcctBalance, nil)
- require.NoError(t, err)
+ a.NoError(err)
}
// Add a very high fee transaction. This should have first priority
// (even if we don't know the encoding length of the underlying signed txn)
txHigh, err := testClient.SendPaymentFromUnencryptedWallet(fromAddress, toAddress, minTxnFee*10, minAcctBalance, nil)
- require.NoError(t, err)
+ a.NoError(err)
statusResponse, err := testClient.GetPendingTransactions(uint64(MaxTxns))
- require.NoError(t, err)
- require.NotEmpty(t, statusResponse)
- require.True(t, int(statusResponse.TotalTxns) == NumTxns+1)
- require.True(t, len(statusResponse.TruncatedTxns.Transactions) == MaxTxns)
- require.True(t, statusResponse.TruncatedTxns.Transactions[0].TxID == txHigh.ID().String())
+ a.NoError(err)
+ a.NotEmpty(statusResponse)
+ a.True(int(statusResponse.TotalTxns) == NumTxns+1)
+ a.True(len(statusResponse.TruncatedTxns.Transactions) == MaxTxns)
+ a.True(statusResponse.TruncatedTxns.Transactions[0].TxID == txHigh.ID().String())
}
diff --git a/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go b/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go
index 461e31e7e..bb09714f9 100644
--- a/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go
+++ b/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go
@@ -49,7 +49,7 @@ func cascadeCreateAndFundAccounts(amountToSend, transactionFee uint64, fundingAc
// sends them all money, and sends them online
func TestManyAccountsCanGoOnline(t *testing.T) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
diff --git a/test/e2e-go/upgrades/application_support_test.go b/test/e2e-go/upgrades/application_support_test.go
index 8514c54fd..6c2752831 100644
--- a/test/e2e-go/upgrades/application_support_test.go
+++ b/test/e2e-go/upgrades/application_support_test.go
@@ -41,19 +41,20 @@ const lastProtocolBeforeApplicationSupport = protocol.ConsensusV23
const firstProtocolWithApplicationSupport = protocol.ConsensusV24
func makeApplicationUpgradeConsensus(t *testing.T) (appConsensus config.ConsensusProtocols) {
+ a := require.New(fixtures.SynchronizedTest(t))
appConsensus = generateFastUpgradeConsensus()
// make sure that the "current" version does not support application and that the "future" version *does* support applications.
currentProtocolParams, ok := appConsensus[consensusTestFastUpgrade(lastProtocolBeforeApplicationSupport)]
- require.True(t, ok)
+ a.True(ok)
futureProtocolParams, ok := appConsensus[consensusTestFastUpgrade(firstProtocolWithApplicationSupport)]
- require.True(t, ok)
+ a.True(ok)
// ensure it's disabled.
- require.False(t, currentProtocolParams.Application)
- require.False(t, currentProtocolParams.SupportRekeying)
+ a.False(currentProtocolParams.Application)
+ a.False(currentProtocolParams.SupportRekeying)
// verify that the future protocol supports applications.
- require.True(t, futureProtocolParams.Application)
+ a.True(futureProtocolParams.Application)
// add an upgrade path from current to future.
currentProtocolParams.ApprovedUpgrades = make(map[protocol.ConsensusVersion]uint64)
@@ -77,36 +78,38 @@ func TestApplicationsUpgradeOverREST(t *testing.T) {
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes100SecondTestUnupgradedProtocol.json"))
defer fixture.Shutdown()
+ a := require.New(fixtures.SynchronizedTest(t))
+
client := fixture.GetLibGoalClientForNamedNode("Node")
accountList, err := fixture.GetNodeWalletsSortedByBalance(client.DataDir())
- require.NoError(t, err)
+ a.NoError(err)
creator := accountList[0].Address
wh, err := client.GetUnencryptedWalletHandle()
- require.NoError(t, err)
+ a.NoError(err)
user, err := client.GenerateAddress(wh)
- require.NoError(t, err)
+ a.NoError(err)
fee := uint64(1000)
round, err := client.CurrentRound()
- require.NoError(t, err)
+ a.NoError(err)
// Fund the manager, so it can issue transactions later on
_, err = client.SendPaymentFromUnencryptedWallet(creator, user, fee, 10000000000, nil)
- require.NoError(t, err)
+ a.NoError(err)
client.WaitForRound(round + 2)
// There should be no apps to start with
ad, err := client.AccountData(creator)
- require.NoError(t, err)
- require.Zero(t, len(ad.AppParams))
+ a.NoError(err)
+ a.Zero(len(ad.AppParams))
ad, err = client.AccountData(user)
- require.NoError(t, err)
- require.Zero(t, len(ad.AppParams))
- require.Equal(t, basics.MicroAlgos{Raw: 10000000000}, ad.MicroAlgos)
+ a.NoError(err)
+ a.Zero(len(ad.AppParams))
+ a.Equal(basics.MicroAlgos{Raw: 10000000000}, ad.MicroAlgos)
counter := `#pragma version 2
// a simple global and local calls counter app
@@ -133,9 +136,9 @@ app_local_put
int 1
`
approvalOps, err := logic.AssembleString(counter)
- require.NoError(t, err)
+ a.NoError(err)
clearstateOps, err := logic.AssembleString("#pragma version 2\nint 1")
- require.NoError(t, err)
+ a.NoError(err)
schema := basics.StateSchema{
NumUint: 1,
}
@@ -144,20 +147,20 @@ int 1
tx, err := client.MakeUnsignedAppCreateTx(
transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil,
)
- require.NoError(t, err)
+ a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(creator, 0, 0, fee, tx)
- require.NoError(t, err)
+ a.NoError(err)
signedTxn, err := client.SignTransactionWithWallet(wh, nil, tx)
- require.NoError(t, err)
+ a.NoError(err)
round, err = client.CurrentRound()
- require.NoError(t, err)
+ a.NoError(err)
_, err = client.BroadcastTransaction(signedTxn)
- require.Error(t, err)
- require.Contains(t, err.Error(), "application transaction not supported")
+ a.Error(err)
+ a.Contains(err.Error(), "application transaction not supported")
curStatus, err := client.Status()
- require.NoError(t, err)
+ a.NoError(err)
initialStatus := curStatus
startLoopTime := time.Now()
@@ -165,31 +168,31 @@ int 1
// wait until the network upgrade : this can take a while.
for curStatus.LastVersion == initialStatus.LastVersion {
curStatus, err = client.Status()
- require.NoError(t, err)
+ a.NoError(err)
- require.Less(t, int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute))
+ a.Less(int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute))
time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond)
round = curStatus.LastRound
}
// now, that we have upgraded to the new protocol which supports applications, try again.
_, err = client.BroadcastTransaction(signedTxn)
- require.NoError(t, err)
+ a.NoError(err)
curStatus, err = client.Status()
- require.NoError(t, err)
+ a.NoError(err)
round = curStatus.LastRound
client.WaitForRound(round + 2)
pendingTx, err := client.GetPendingTransactions(1)
- require.NoError(t, err)
- require.Equal(t, uint64(0), pendingTx.TotalTxns)
+ a.NoError(err)
+ a.Equal(uint64(0), pendingTx.TotalTxns)
// check creator's balance record for the app entry and the state changes
ad, err = client.AccountData(creator)
- require.NoError(t, err)
- require.Equal(t, 1, len(ad.AppParams))
+ a.NoError(err)
+ a.Equal(1, len(ad.AppParams))
var appIdx basics.AppIndex
var params basics.AppParams
for i, p := range ad.AppParams {
@@ -197,84 +200,84 @@ int 1
params = p
break
}
- require.Equal(t, approvalOps.Program, params.ApprovalProgram)
- require.Equal(t, clearstateOps.Program, params.ClearStateProgram)
- require.Equal(t, schema, params.LocalStateSchema)
- require.Equal(t, schema, params.GlobalStateSchema)
- require.Equal(t, 1, len(params.GlobalState))
+ a.Equal(approvalOps.Program, params.ApprovalProgram)
+ a.Equal(clearstateOps.Program, params.ClearStateProgram)
+ a.Equal(schema, params.LocalStateSchema)
+ a.Equal(schema, params.GlobalStateSchema)
+ a.Equal(1, len(params.GlobalState))
value, ok := params.GlobalState["counter"]
- require.True(t, ok)
- require.Equal(t, uint64(1), value.Uint)
+ a.True(ok)
+ a.Equal(uint64(1), value.Uint)
- require.Equal(t, 1, len(ad.AppLocalStates))
+ a.Equal(1, len(ad.AppLocalStates))
state, ok := ad.AppLocalStates[appIdx]
- require.True(t, ok)
- require.Equal(t, schema, state.Schema)
- require.Equal(t, 1, len(state.KeyValue))
+ a.True(ok)
+ a.Equal(schema, state.Schema)
+ a.Equal(1, len(state.KeyValue))
value, ok = state.KeyValue["counter"]
- require.True(t, ok)
- require.Equal(t, uint64(1), value.Uint)
+ a.True(ok)
+ a.Equal(uint64(1), value.Uint)
// call the app
tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil)
- require.NoError(t, err)
+ a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx)
- require.NoError(t, err)
+ a.NoError(err)
signedTxn, err = client.SignTransactionWithWallet(wh, nil, tx)
- require.NoError(t, err)
+ a.NoError(err)
round, err = client.CurrentRound()
- require.NoError(t, err)
+ a.NoError(err)
_, err = client.BroadcastTransaction(signedTxn)
- require.NoError(t, err)
+ a.NoError(err)
client.WaitForRound(round + 2)
// check creator's balance record for the app entry and the state changes
ad, err = client.AccountData(creator)
- require.NoError(t, err)
- require.Equal(t, 1, len(ad.AppParams))
+ a.NoError(err)
+ a.Equal(1, len(ad.AppParams))
params, ok = ad.AppParams[appIdx]
- require.True(t, ok)
- require.Equal(t, approvalOps.Program, params.ApprovalProgram)
- require.Equal(t, clearstateOps.Program, params.ClearStateProgram)
- require.Equal(t, schema, params.LocalStateSchema)
- require.Equal(t, schema, params.GlobalStateSchema)
- require.Equal(t, 1, len(params.GlobalState))
+ a.True(ok)
+ a.Equal(approvalOps.Program, params.ApprovalProgram)
+ a.Equal(clearstateOps.Program, params.ClearStateProgram)
+ a.Equal(schema, params.LocalStateSchema)
+ a.Equal(schema, params.GlobalStateSchema)
+ a.Equal(1, len(params.GlobalState))
value, ok = params.GlobalState["counter"]
- require.True(t, ok)
- require.Equal(t, uint64(2), value.Uint)
+ a.True(ok)
+ a.Equal(uint64(2), value.Uint)
- require.Equal(t, 1, len(ad.AppLocalStates))
+ a.Equal(1, len(ad.AppLocalStates))
state, ok = ad.AppLocalStates[appIdx]
- require.True(t, ok)
- require.Equal(t, schema, state.Schema)
- require.Equal(t, 1, len(state.KeyValue))
+ a.True(ok)
+ a.Equal(schema, state.Schema)
+ a.Equal(1, len(state.KeyValue))
value, ok = state.KeyValue["counter"]
- require.True(t, ok)
- require.Equal(t, uint64(1), value.Uint)
+ a.True(ok)
+ a.Equal(uint64(1), value.Uint)
- require.Equal(t, uint64(2), ad.TotalAppSchema.NumUint)
+ a.Equal(uint64(2), ad.TotalAppSchema.NumUint)
// check user's balance record for the app entry and the state changes
ad, err = client.AccountData(user)
- require.NoError(t, err)
- require.Equal(t, 0, len(ad.AppParams))
+ a.NoError(err)
+ a.Equal(0, len(ad.AppParams))
- require.Equal(t, 1, len(ad.AppLocalStates))
+ a.Equal(1, len(ad.AppLocalStates))
state, ok = ad.AppLocalStates[appIdx]
- require.True(t, ok)
- require.Equal(t, schema, state.Schema)
- require.Equal(t, 1, len(state.KeyValue))
+ a.True(ok)
+ a.Equal(schema, state.Schema)
+ a.Equal(1, len(state.KeyValue))
value, ok = state.KeyValue["counter"]
- require.True(t, ok)
- require.Equal(t, uint64(1), value.Uint)
+ a.True(ok)
+ a.Equal(uint64(1), value.Uint)
- require.Equal(t, basics.MicroAlgos{Raw: 10000000000 - fee}, ad.MicroAlgos)
+ a.Equal(basics.MicroAlgos{Raw: 10000000000 - fee}, ad.MicroAlgos)
app, err := client.ApplicationInformation(uint64(appIdx))
- require.NoError(t, err)
- require.Equal(t, uint64(appIdx), app.Id)
- require.Equal(t, creator, app.Params.Creator)
+ a.NoError(err)
+ a.Equal(uint64(appIdx), app.Id)
+ a.Equal(creator, app.Params.Creator)
return
}
@@ -282,6 +285,7 @@ int 1
// to a version that supports applications. It verify that prior to supporting applications, the node would not accept
// any application transaction and after the upgrade is complete, it would support that.
func TestApplicationsUpgradeOverGossip(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
smallLambdaMs := 500
consensus := makeApplicationUpgradeConsensus(t)
@@ -298,44 +302,44 @@ func TestApplicationsUpgradeOverGossip(t *testing.T) {
client := fixture.GetLibGoalClientForNamedNode("Primary")
secondary := fixture.GetLibGoalClientForNamedNode("Node")
err := config.SaveConfigurableConsensus(client.DataDir(), consensus)
- require.NoError(t, err)
+ a.NoError(err)
fixture.Start()
defer fixture.Shutdown()
accountList, err := fixture.GetNodeWalletsSortedByBalance(client.DataDir())
- require.NoError(t, err)
+ a.NoError(err)
creator := accountList[0].Address
wh, err := client.GetUnencryptedWalletHandle()
- require.NoError(t, err)
+ a.NoError(err)
user, err := client.GenerateAddress(wh)
- require.NoError(t, err)
+ a.NoError(err)
fee := uint64(1000)
round, err := client.CurrentRound()
- require.NoError(t, err)
+ a.NoError(err)
// Fund the manager, so it can issue transactions later on
_, err = client.SendPaymentFromUnencryptedWallet(creator, user, fee, 10000000000, nil)
- require.NoError(t, err)
+ a.NoError(err)
client.WaitForRound(round + 2)
round, err = client.CurrentRound()
- require.NoError(t, err)
+ a.NoError(err)
// There should be no apps to start with
ad, err := client.AccountData(creator)
- require.NoError(t, err)
- require.Zero(t, len(ad.AppParams))
+ a.NoError(err)
+ a.Zero(len(ad.AppParams))
ad, err = client.AccountData(user)
- require.NoError(t, err)
- require.Zero(t, len(ad.AppParams))
- require.Equal(t, basics.MicroAlgos{Raw: 10000000000}, ad.MicroAlgos)
+ a.NoError(err)
+ a.Zero(len(ad.AppParams))
+ a.Equal(basics.MicroAlgos{Raw: 10000000000}, ad.MicroAlgos)
counter := `#pragma version 2
// a simple global and local calls counter app
@@ -362,9 +366,9 @@ app_local_put
int 1
`
approvalOps, err := logic.AssembleString(counter)
- require.NoError(t, err)
+ a.NoError(err)
clearstateOps, err := logic.AssembleString("#pragma version 2\nint 1")
- require.NoError(t, err)
+ a.NoError(err)
schema := basics.StateSchema{
NumUint: 1,
}
@@ -373,39 +377,39 @@ int 1
tx, err := client.MakeUnsignedAppCreateTx(
transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil,
)
- require.NoError(t, err)
+ a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(creator, round, round+primaryNodeUnupgradedProtocol.DefaultUpgradeWaitRounds, fee, tx)
- require.NoError(t, err)
+ a.NoError(err)
signedTxn, err := client.SignTransactionWithWallet(wh, nil, tx)
- require.NoError(t, err)
+ a.NoError(err)
round, err = client.CurrentRound()
- require.NoError(t, err)
+ a.NoError(err)
_, err = client.BroadcastTransaction(signedTxn)
- require.NoError(t, err)
+ a.NoError(err)
// this transaction is expect to reach the first node ( primary ), but to be rejected by the second node when transmitted over gossip.
client.WaitForRound(round + 2)
// check that the primary node still has this transaction in it's transaction pool.
pendingTx, err := client.GetPendingTransactions(1)
- require.NoError(t, err)
+ a.NoError(err)
round, err = client.CurrentRound()
- require.NoError(t, err)
+ a.NoError(err)
if round > round+primaryNodeUnupgradedProtocol.DefaultUpgradeWaitRounds {
t.Skip("Test platform is too slow for this test")
}
- require.Equal(t, uint64(1), pendingTx.TotalTxns)
+ a.Equal(uint64(1), pendingTx.TotalTxns)
// check that the secondary node doesn't have that transaction in it's transaction pool.
pendingTx, err = secondary.GetPendingTransactions(1)
- require.NoError(t, err)
- require.Equal(t, uint64(0), pendingTx.TotalTxns)
+ a.NoError(err)
+ a.Equal(uint64(0), pendingTx.TotalTxns)
curStatus, err := client.Status()
- require.NoError(t, err)
+ a.NoError(err)
initialStatus := curStatus
startLoopTime := time.Now()
@@ -413,35 +417,35 @@ int 1
// wait until the network upgrade : this can take a while.
for curStatus.LastVersion == initialStatus.LastVersion {
curStatus, err = client.Status()
- require.NoError(t, err)
+ a.NoError(err)
- require.Less(t, int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute))
+ a.Less(int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute))
time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond)
round = curStatus.LastRound
}
// now, that we have upgraded to the new protocol which supports applications, try again.
tx, err = client.FillUnsignedTxTemplate(creator, round, round+100, fee, tx)
- require.NoError(t, err)
+ a.NoError(err)
signedTxn, err = client.SignTransactionWithWallet(wh, nil, tx)
- require.NoError(t, err)
+ a.NoError(err)
_, err = client.BroadcastTransaction(signedTxn)
- require.NoError(t, err)
+ a.NoError(err)
curStatus, err = client.Status()
- require.NoError(t, err)
+ a.NoError(err)
round = curStatus.LastRound
client.WaitForRound(round + 2)
pendingTx, err = client.GetPendingTransactions(1)
- require.NoError(t, err)
- require.Equal(t, uint64(0), pendingTx.TotalTxns)
+ a.NoError(err)
+ a.Equal(uint64(0), pendingTx.TotalTxns)
// check creator's balance record for the app entry and the state changes
ad, err = client.AccountData(creator)
- require.NoError(t, err)
- require.Equal(t, 1, len(ad.AppParams))
+ a.NoError(err)
+ a.Equal(1, len(ad.AppParams))
var appIdx basics.AppIndex
var params basics.AppParams
for i, p := range ad.AppParams {
@@ -449,83 +453,83 @@ int 1
params = p
break
}
- require.Equal(t, approvalOps.Program, params.ApprovalProgram)
- require.Equal(t, clearstateOps.Program, params.ClearStateProgram)
- require.Equal(t, schema, params.LocalStateSchema)
- require.Equal(t, schema, params.GlobalStateSchema)
- require.Equal(t, 1, len(params.GlobalState))
+ a.Equal(approvalOps.Program, params.ApprovalProgram)
+ a.Equal(clearstateOps.Program, params.ClearStateProgram)
+ a.Equal(schema, params.LocalStateSchema)
+ a.Equal(schema, params.GlobalStateSchema)
+ a.Equal(1, len(params.GlobalState))
value, ok := params.GlobalState["counter"]
- require.True(t, ok)
- require.Equal(t, uint64(1), value.Uint)
+ a.True(ok)
+ a.Equal(uint64(1), value.Uint)
- require.Equal(t, 1, len(ad.AppLocalStates))
+ a.Equal(1, len(ad.AppLocalStates))
state, ok := ad.AppLocalStates[appIdx]
- require.True(t, ok)
- require.Equal(t, schema, state.Schema)
- require.Equal(t, 1, len(state.KeyValue))
+ a.True(ok)
+ a.Equal(schema, state.Schema)
+ a.Equal(1, len(state.KeyValue))
value, ok = state.KeyValue["counter"]
- require.True(t, ok)
- require.Equal(t, uint64(1), value.Uint)
+ a.True(ok)
+ a.Equal(uint64(1), value.Uint)
// call the app
tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil)
- require.NoError(t, err)
+ a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx)
- require.NoError(t, err)
+ a.NoError(err)
signedTxn, err = client.SignTransactionWithWallet(wh, nil, tx)
- require.NoError(t, err)
+ a.NoError(err)
round, err = client.CurrentRound()
- require.NoError(t, err)
+ a.NoError(err)
_, err = client.BroadcastTransaction(signedTxn)
- require.NoError(t, err)
+ a.NoError(err)
client.WaitForRound(round + 2)
// check creator's balance record for the app entry and the state changes
ad, err = client.AccountData(creator)
- require.NoError(t, err)
- require.Equal(t, 1, len(ad.AppParams))
+ a.NoError(err)
+ a.Equal(1, len(ad.AppParams))
params, ok = ad.AppParams[appIdx]
- require.True(t, ok)
- require.Equal(t, approvalOps.Program, params.ApprovalProgram)
- require.Equal(t, clearstateOps.Program, params.ClearStateProgram)
- require.Equal(t, schema, params.LocalStateSchema)
- require.Equal(t, schema, params.GlobalStateSchema)
- require.Equal(t, 1, len(params.GlobalState))
+ a.True(ok)
+ a.Equal(approvalOps.Program, params.ApprovalProgram)
+ a.Equal(clearstateOps.Program, params.ClearStateProgram)
+ a.Equal(schema, params.LocalStateSchema)
+ a.Equal(schema, params.GlobalStateSchema)
+ a.Equal(1, len(params.GlobalState))
value, ok = params.GlobalState["counter"]
- require.True(t, ok)
- require.Equal(t, uint64(2), value.Uint)
+ a.True(ok)
+ a.Equal(uint64(2), value.Uint)
- require.Equal(t, 1, len(ad.AppLocalStates))
+ a.Equal(1, len(ad.AppLocalStates))
state, ok = ad.AppLocalStates[appIdx]
- require.True(t, ok)
- require.Equal(t, schema, state.Schema)
- require.Equal(t, 1, len(state.KeyValue))
+ a.True(ok)
+ a.Equal(schema, state.Schema)
+ a.Equal(1, len(state.KeyValue))
value, ok = state.KeyValue["counter"]
- require.True(t, ok)
- require.Equal(t, uint64(1), value.Uint)
+ a.True(ok)
+ a.Equal(uint64(1), value.Uint)
- require.Equal(t, uint64(2), ad.TotalAppSchema.NumUint)
+ a.Equal(uint64(2), ad.TotalAppSchema.NumUint)
// check user's balance record for the app entry and the state changes
ad, err = client.AccountData(user)
- require.NoError(t, err)
- require.Equal(t, 0, len(ad.AppParams))
+ a.NoError(err)
+ a.Equal(0, len(ad.AppParams))
- require.Equal(t, 1, len(ad.AppLocalStates))
+ a.Equal(1, len(ad.AppLocalStates))
state, ok = ad.AppLocalStates[appIdx]
- require.True(t, ok)
- require.Equal(t, schema, state.Schema)
- require.Equal(t, 1, len(state.KeyValue))
+ a.True(ok)
+ a.Equal(schema, state.Schema)
+ a.Equal(1, len(state.KeyValue))
value, ok = state.KeyValue["counter"]
- require.True(t, ok)
- require.Equal(t, uint64(1), value.Uint)
+ a.True(ok)
+ a.Equal(uint64(1), value.Uint)
- require.Equal(t, basics.MicroAlgos{Raw: 10000000000 - fee}, ad.MicroAlgos)
+ a.Equal(basics.MicroAlgos{Raw: 10000000000 - fee}, ad.MicroAlgos)
app, err := client.ApplicationInformation(uint64(appIdx))
- require.NoError(t, err)
- require.Equal(t, uint64(appIdx), app.Id)
- require.Equal(t, creator, app.Params.Creator)
+ a.NoError(err)
+ a.Equal(uint64(appIdx), app.Id)
+ a.Equal(creator, app.Params.Creator)
return
}
diff --git a/test/e2e-go/upgrades/rekey_support_test.go b/test/e2e-go/upgrades/rekey_support_test.go
index 2cb85dfa4..e4e56c5bb 100644
--- a/test/e2e-go/upgrades/rekey_support_test.go
+++ b/test/e2e-go/upgrades/rekey_support_test.go
@@ -29,7 +29,7 @@ import (
// TestRekeyUpgrade tests that we rekey does not work before the upgrade and works well after
func TestRekeyUpgrade(t *testing.T) {
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
smallLambdaMs := 500
consensus := makeApplicationUpgradeConsensus(t)
@@ -73,7 +73,7 @@ func TestRekeyUpgrade(t *testing.T) {
a.NoError(err)
_, err = client.BroadcastTransaction(rekey)
a.Error(err)
- require.Contains(t, err.Error(), "transaction has RekeyTo set but rekeying not yet enable")
+ a.Contains(err.Error(), "transaction has RekeyTo set but rekeying not yet enable")
// use rekeyed key to authorize (AuthAddr check)
tx.RekeyTo = basics.Address{}
@@ -81,10 +81,10 @@ func TestRekeyUpgrade(t *testing.T) {
a.NoError(err)
_, err = client.BroadcastTransaction(rekeyed)
a.Error(err)
- require.Contains(t, err.Error(), "nonempty AuthAddr but rekeying not supported")
+ a.Contains(err.Error(), "nonempty AuthAddr but rekeying not supported")
// go to upgrade
curStatus, err := client.Status()
- require.NoError(t, err)
+ a.NoError(err)
initialStatus := curStatus
startLoopTime := time.Now()
@@ -92,21 +92,21 @@ func TestRekeyUpgrade(t *testing.T) {
// wait until the network upgrade : this can take a while.
for curStatus.LastVersion == initialStatus.LastVersion {
curStatus, err = client.Status()
- require.NoError(t, err)
+ a.NoError(err)
- require.Less(t, int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute))
+ a.Less(int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute))
time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond)
round = curStatus.LastRound
}
// now, that we have upgraded to the new protocol which supports rekey, try again.
_, err = client.BroadcastTransaction(rekey)
- require.NoError(t, err)
+ a.NoError(err)
round, err = client.CurrentRound()
a.NoError(err)
client.WaitForRound(round + 1)
_, err = client.BroadcastTransaction(rekeyed)
- require.NoError(t, err)
+ a.NoError(err)
}
diff --git a/test/e2e-go/upgrades/send_receive_upgrade_test.go b/test/e2e-go/upgrades/send_receive_upgrade_test.go
index 548d1ff45..6ad81e8ff 100644
--- a/test/e2e-go/upgrades/send_receive_upgrade_test.go
+++ b/test/e2e-go/upgrades/send_receive_upgrade_test.go
@@ -98,7 +98,7 @@ func generateFastUpgradeConsensus() (fastUpgradeProtocols config.ConsensusProtoc
func testAccountsCanSendMoneyAcrossUpgrade(t *testing.T, templatePath string) {
t.Parallel()
- a := require.New(t)
+ a := require.New(fixtures.SynchronizedTest(t))
consensus := generateFastUpgradeConsensus()
diff --git a/test/framework/fixtures/auctionFixture.go b/test/framework/fixtures/auctionFixture.go
index e5ef6b5d6..2a88a6e1b 100644
--- a/test/framework/fixtures/auctionFixture.go
+++ b/test/framework/fixtures/auctionFixture.go
@@ -30,7 +30,6 @@ import (
"strings"
"sync"
"syscall"
- "testing"
"time"
"github.com/stretchr/testify/assert"
@@ -164,9 +163,9 @@ func (f *AuctionFixture) GetAuctionConsoleRestClient() auctionClient.ConsoleRest
}
// Setup is called to initialize the test fixture for the test(s), uses default ports for auction bank and console
-func (f *AuctionFixture) Setup(t *testing.T, templateFile string) (err error) {
+func (f *AuctionFixture) Setup(t TestingTB, templateFile string) (err error) {
- f.t = t
+ f.t = SynchronizedTest(t)
f.bidderSecretKeyCache = make(map[string]crypto.PrivateKey)
diff --git a/test/framework/fixtures/expectFixture.go b/test/framework/fixtures/expectFixture.go
index 415094409..5d34b0c2c 100644
--- a/test/framework/fixtures/expectFixture.go
+++ b/test/framework/fixtures/expectFixture.go
@@ -24,6 +24,7 @@ import (
"path"
"path/filepath"
"regexp"
+ "runtime"
"strings"
"testing"
@@ -55,7 +56,9 @@ func (ef *ExpectFixture) initialize(t *testing.T) (err error) {
}
ef.testDataDir = os.Getenv("TESTDATADIR")
if ef.testDataDir == "" {
- ef.testDataDir = os.ExpandEnv("${GOPATH}/src/github.com/algorand/go-algorand/test/testdata")
+ // Default to test/testdata in the source tree being tested
+ _, path, _, _ := runtime.Caller(0)
+ ef.testDataDir = filepath.Join(filepath.Dir(path), "../../testdata")
}
ef.testFilter = os.Getenv("TESTFILTER")
@@ -101,9 +104,9 @@ func MakeExpectTest(t *testing.T) *ExpectFixture {
}
return nil
})
- require.NoError(t, err)
+ require.NoError(SynchronizedTest(t), err)
err = ef.initialize(t)
- require.NoError(t, err)
+ require.NoError(SynchronizedTest(t), err)
return ef
}
@@ -112,9 +115,10 @@ func (ef *ExpectFixture) Run() {
for testName := range ef.expectFiles {
if match, _ := regexp.MatchString(ef.testFilter, testName); match {
ef.t.Run(testName, func(t *testing.T) {
+ syncTest := SynchronizedTest(t)
workingDir, algoDir, err := ef.getTestDir(testName)
- require.NoError(t, err)
- t.Logf("algoDir: %s\ntestDataDir:%s\n", algoDir, ef.testDataDir)
+ require.NoError(SynchronizedTest(t), err)
+ syncTest.Logf("algoDir: %s\ntestDataDir:%s\n", algoDir, ef.testDataDir)
cmd := exec.Command("expect", testName, algoDir, ef.testDataDir)
var outBuf bytes.Buffer
cmd.Stdout = &outBuf
@@ -128,8 +132,8 @@ func (ef *ExpectFixture) Run() {
// Using os.File as stderr does not trigger goroutine creation, instead exec.Cmd relies on os.File implementation.
errFile, err := os.OpenFile(path.Join(workingDir, "stderr.txt"), os.O_CREATE|os.O_RDWR, 0)
if err != nil {
- t.Logf("failed opening stderr temp file: %s\n", err.Error())
- t.Fail()
+ syncTest.Logf("failed opening stderr temp file: %s\n", err.Error())
+ syncTest.Fail()
}
defer errFile.Close() // Close might error but we Sync it before leaving the scope
cmd.Stderr = errFile
@@ -151,8 +155,8 @@ func (ef *ExpectFixture) Run() {
if ferr != nil {
stderr = ferr.Error()
}
- t.Logf("err running '%s': %s\nstdout: %s\nstderr: %s\n", testName, err, string(outBuf.Bytes()), stderr)
- t.Fail()
+ syncTest.Logf("err running '%s': %s\nstdout: %s\nstderr: %s\n", testName, err, string(outBuf.Bytes()), stderr)
+ syncTest.Fail()
} else {
// t.Logf("stdout: %s", string(outBuf.Bytes()))
ef.removeTestDir(workingDir)
diff --git a/test/framework/fixtures/fixture.go b/test/framework/fixtures/fixture.go
index 4bb1cf625..2775a31d7 100644
--- a/test/framework/fixtures/fixture.go
+++ b/test/framework/fixtures/fixture.go
@@ -16,18 +16,30 @@
package fixtures
-import "testing"
+import (
+ "testing"
-// TestingT captures the common methods of *testing.T and *testing.B
-// that we use.
-type TestingT interface {
- Fatalf(format string, args ...interface{})
- Errorf(format string, args ...interface{})
+ "github.com/algorand/go-deadlock"
+)
+
+// TestingTB is identical to testing.TB, beside the private method.
+type TestingTB interface {
+ Cleanup(func())
Error(args ...interface{})
- Logf(format string, args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fail()
FailNow()
Failed() bool
+ Fatal(args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Helper()
+ Log(args ...interface{})
+ Logf(format string, args ...interface{})
Name() string
+ Skip(args ...interface{})
+ SkipNow()
+ Skipf(format string, args ...interface{})
+ Skipped() bool
}
// Fixture provides the base interface for all E2E test fixtures
@@ -50,3 +62,110 @@ type Fixture interface {
// (e.g. shared across all tests in a package)
ShutdownImpl(preserveData bool)
}
+
+var synchTestMu deadlock.Mutex
+var synchTests = make(map[TestingTB]TestingTB)
+
+// SynchronizedTest generates a testing.TB compatible test for a given testing.TB interface.
+// calling SynchronizedTest with the same tb would return the exact same instance of synchTest
+func SynchronizedTest(tb TestingTB) TestingTB {
+ if st, ok := tb.(*synchTest); ok {
+ return st
+ }
+ synchTestMu.Lock()
+ defer synchTestMu.Unlock()
+ if t, have := synchTests[tb]; have {
+ return t
+ }
+ t := &synchTest{
+ t: tb,
+ }
+ synchTests[tb] = t
+ return t
+}
+
+type synchTest struct {
+ deadlock.Mutex
+ t TestingTB
+}
+
+func (st *synchTest) Cleanup(f func()) {
+ st.Lock()
+ defer st.Unlock()
+ st.t.Cleanup(f)
+}
+func (st *synchTest) Error(args ...interface{}) {
+ st.Lock()
+ defer st.Unlock()
+ st.t.Error(args...)
+}
+func (st *synchTest) Errorf(format string, args ...interface{}) {
+ st.Lock()
+ defer st.Unlock()
+ st.t.Errorf(format, args...)
+}
+func (st *synchTest) Fail() {
+ st.Lock()
+ defer st.Unlock()
+ st.t.Fail()
+}
+func (st *synchTest) FailNow() {
+ st.Lock()
+ defer st.Unlock()
+ st.t.FailNow()
+}
+func (st *synchTest) Failed() bool {
+ st.Lock()
+ defer st.Unlock()
+ return st.t.Failed()
+}
+func (st *synchTest) Fatal(args ...interface{}) {
+ st.Lock()
+ defer st.Unlock()
+ st.t.Fatal(args...)
+}
+func (st *synchTest) Fatalf(format string, args ...interface{}) {
+ st.Lock()
+ defer st.Unlock()
+ st.t.Fatalf(format, args...)
+}
+func (st *synchTest) Helper() {
+ st.Lock()
+ defer st.Unlock()
+ st.t.Helper()
+}
+func (st *synchTest) Log(args ...interface{}) {
+ st.Lock()
+ defer st.Unlock()
+ st.t.Log(args...)
+}
+func (st *synchTest) Logf(format string, args ...interface{}) {
+ st.Lock()
+ defer st.Unlock()
+ st.t.Logf(format, args...)
+}
+func (st *synchTest) Name() string {
+ st.Lock()
+ defer st.Unlock()
+ return st.t.Name()
+}
+func (st *synchTest) Skip(args ...interface{}) {
+ st.Lock()
+ defer st.Unlock()
+ st.t.Skip(args...)
+}
+func (st *synchTest) SkipNow() {
+ st.Lock()
+ defer st.Unlock()
+ st.t.SkipNow()
+}
+func (st *synchTest) Skipf(format string, args ...interface{}) {
+ st.Lock()
+ defer st.Unlock()
+ st.t.Skipf(format, args...)
+}
+func (st *synchTest) Skipped() bool {
+ st.Lock()
+ defer st.Unlock()
+ return st.t.Skipped()
+}
diff --git a/test/framework/fixtures/kmdFixture.go b/test/framework/fixtures/kmdFixture.go
index 89cf1bbb7..1226bd090 100644
--- a/test/framework/fixtures/kmdFixture.go
+++ b/test/framework/fixtures/kmdFixture.go
@@ -50,7 +50,7 @@ var defaultAPIToken = []byte(strings.Repeat("a", 64))
// KMDFixture is a test fixture for tests requiring interactions with kmd
type KMDFixture struct {
baseFixture
- t TestingT
+ t TestingTB
initialized bool
dataDir string
kmdDir string
@@ -94,39 +94,39 @@ func (f *KMDFixture) ShutdownImpl(preserveData bool) {
}
// SetupWithWallet starts kmd and creates a wallet, returning a wallet handle
-func (f *KMDFixture) SetupWithWallet(t TestingT) (handleToken string) {
+func (f *KMDFixture) SetupWithWallet(t TestingTB) (handleToken string) {
f.Setup(t)
handleToken, _ = f.MakeWalletAndHandleToken()
return
}
// Setup starts kmd with the default config
-func (f *KMDFixture) Setup(t TestingT) {
+func (f *KMDFixture) Setup(t TestingTB) {
f.SetupWithConfig(t, "")
}
// Initialize initializes the dataDir and TestingT for this test but doesn't start kmd
-func (f *KMDFixture) Initialize(t TestingT) {
+func (f *KMDFixture) Initialize(t TestingTB) {
f.initialize(f)
- f.t = t
+ f.t = SynchronizedTest(t)
f.dataDir = filepath.Join(f.testDir, t.Name())
// Remove any existing tests in this dataDir + recreate
err := os.RemoveAll(f.dataDir)
- require.NoError(t, err)
+ require.NoError(f.t, err)
err = os.Mkdir(f.dataDir, 0750)
- require.NoError(t, err)
+ require.NoError(f.t, err)
// Set up the kmd data dir within the main datadir
f.kmdDir = filepath.Join(f.dataDir, nodecontrol.DefaultKMDDataDir)
err = os.Mkdir(f.kmdDir, nodecontrol.DefaultKMDDataDirPerms)
- require.NoError(t, err)
+ require.NoError(f.t, err)
}
// SetupWithConfig starts a kmd node with the passed config or default test
// config, if the passed config is blank. Though internally an error might
// occur during setup, we never return one, because we'll still fail the test
// for any errors here, and it keeps the test code much cleaner
-func (f *KMDFixture) SetupWithConfig(t TestingT, config string) {
+func (f *KMDFixture) SetupWithConfig(t TestingTB, config string) {
// Setup is called once per test, so it's OK for test to store one particular TestingT
f.Initialize(t)
@@ -134,14 +134,14 @@ func (f *KMDFixture) SetupWithConfig(t TestingT, config string) {
f.APIToken = defaultAPIToken
tokenFilepath := filepath.Join(f.kmdDir, "kmd.token")
err := ioutil.WriteFile(tokenFilepath, f.APIToken, 0640)
- require.NoError(t, err)
+ require.NoError(f.t, err)
if config == "" {
config = defaultConfig
}
configFilepath := filepath.Join(f.kmdDir, "kmd_config.json")
err = ioutil.WriteFile(configFilepath, []byte(config), 0640)
- require.NoError(t, err)
+ require.NoError(f.t, err)
// Start kmd
nc := nodecontrol.MakeNodeController(f.binDir, f.dataDir)
@@ -149,17 +149,17 @@ func (f *KMDFixture) SetupWithConfig(t TestingT, config string) {
_, err = nc.StartKMD(nodecontrol.KMDStartArgs{
TimeoutSecs: defaultTimeoutSecs,
})
- require.NoError(t, err)
+ require.NoError(f.t, err)
// Mark ourselves as initialized so we know to shut down server
f.initialized = true
// Build a client
sock, err := util.GetFirstLineFromFile(filepath.Join(f.kmdDir, "kmd.net"))
- require.NoError(t, err)
+ require.NoError(f.t, err)
f.Sock = sock
client, err := client.MakeKMDClient(f.Sock, string(f.APIToken))
- require.NoError(t, err)
+ require.NoError(f.t, err)
f.Client = &client
}
diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go
index 60bd6fc96..3f390e61c 100644
--- a/test/framework/fixtures/libgoalFixture.go
+++ b/test/framework/fixtures/libgoalFixture.go
@@ -50,7 +50,7 @@ type LibGoalFixture struct {
rootDir string
Name string
network netdeploy.Network
- t TestingT
+ t TestingTB
tMu deadlock.RWMutex
clientPartKeys map[string][]account.Participation
consensus config.ConsensusProtocols
@@ -63,13 +63,13 @@ func (f *RestClientFixture) SetConsensus(consensus config.ConsensusProtocols) {
}
// Setup is called to initialize the test fixture for the test(s)
-func (f *LibGoalFixture) Setup(t TestingT, templateFile string) {
+func (f *LibGoalFixture) Setup(t TestingTB, templateFile string) {
f.setup(t, t.Name(), templateFile, true)
}
// SetupNoStart is called to initialize the test fixture for the test(s)
// but does not start the network before returning. Call NC.Start() to start later.
-func (f *LibGoalFixture) SetupNoStart(t TestingT, templateFile string) {
+func (f *LibGoalFixture) SetupNoStart(t TestingTB, templateFile string) {
f.setup(t, t.Name(), templateFile, false)
}
@@ -83,10 +83,10 @@ func (f *LibGoalFixture) Genesis() gen.GenesisData {
return f.network.Genesis()
}
-func (f *LibGoalFixture) setup(test TestingT, testName string, templateFile string, startNetwork bool) {
+func (f *LibGoalFixture) setup(test TestingTB, testName string, templateFile string, startNetwork bool) {
// Call initialize for our base implementation
f.initialize(f)
- f.t = test
+ f.t = SynchronizedTest(test)
f.rootDir = filepath.Join(f.testDir, testName)
// In case we're running tests against the same rootDir, purge it to avoid errors from already-exists
@@ -186,13 +186,17 @@ func (f *LibGoalFixture) importRootKeys(lg *libgoal.Client, dataDir string) {
if err != nil {
// Couldn't read it, skip it
err = nil
+ handle.Close()
continue
}
// Early reject partkeys if we already have a rootkey for the account
if !accountsWithRootKeys[participation.Address().String()] {
- allPartKeys = append(allPartKeys, participation)
+ allPartKeys = append(allPartKeys, participation.Participation)
}
+
+ // close the database handle.
+ participation.Close()
}
}
@@ -273,10 +277,10 @@ func (f *LibGoalFixture) Start() {
// SetTestContext should be called within each test using a shared fixture.
// It ensures the current test context is set and then reset after the test ends
// It should be called in the form of "defer fixture.SetTestContext(t)()"
-func (f *LibGoalFixture) SetTestContext(t TestingT) func() {
+func (f *LibGoalFixture) SetTestContext(t TestingTB) func() {
f.tMu.Lock()
defer f.tMu.Unlock()
- f.t = t
+ f.t = SynchronizedTest(t)
return func() {
f.tMu.Lock()
defer f.tMu.Unlock()
diff --git a/test/framework/fixtures/restClientFixture.go b/test/framework/fixtures/restClientFixture.go
index 9f57cd046..a27a1a5b1 100644
--- a/test/framework/fixtures/restClientFixture.go
+++ b/test/framework/fixtures/restClientFixture.go
@@ -39,14 +39,14 @@ type RestClientFixture struct {
}
// Setup is called to initialize the test fixture for the test(s)
-func (f *RestClientFixture) Setup(t TestingT, templateFile string) {
+func (f *RestClientFixture) Setup(t TestingTB, templateFile string) {
f.LibGoalFixture.Setup(t, templateFile)
f.AlgodClient = f.GetAlgodClientForController(f.NC)
}
// SetupNoStart is called to initialize the test fixture for the test(s)
// but does not start the network before returning. Call NC.Start() to start later.
-func (f *RestClientFixture) SetupNoStart(t TestingT, templateFile string) {
+func (f *RestClientFixture) SetupNoStart(t TestingTB, templateFile string) {
f.LibGoalFixture.SetupNoStart(t, templateFile)
}
diff --git a/test/heapwatch/README.md b/test/heapwatch/README.md
new file mode 100644
index 000000000..27cb54d31
--- /dev/null
+++ b/test/heapwatch/README.md
@@ -0,0 +1,43 @@
+# Heap Watch
+
+Tools for checking if algod has memory leaks.
+
+Run a local private network of three nodes and two pingpongs.
+
+Periodically sample pprof memory profiles.
+
+Watch memory usage from `ps` and write to a CSV file for each algod.
+
+# Usage
+
+To start:
+
+```sh
+bash test/heapwatch/start.sh /tmp/todaysTest
+```
+
+To stop:
+
+```sh
+bash test/heapwatch/stop.sh /tmp/todaysTest
+```
+
+Results:
+
+Snapshot usage plots and inter-snapshot delta plots.
+
+```sh
+ls /tmp/todaysTest/heaps/*.svg
+```
+
+The raw files for analysis with `go tool pprof`
+
+```sh
+ls /tmp/todaysTest/heaps/*.heap
+```
+
+CSV files of memory usage according to `ps`:
+
+```sh
+ls /tmp/todaysTest/heaps/*.csv
+``` \ No newline at end of file
diff --git a/test/heapwatch/bwstart.sh b/test/heapwatch/bwstart.sh
new file mode 100644
index 000000000..3770136f7
--- /dev/null
+++ b/test/heapwatch/bwstart.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+#
+# Run a local 3-relay 8-leaf-node test.
+# Run 40 TPS of payment txns through it.
+# Record metrics for bandwidth analysis.
+
+set -e
+set -o pipefail
+set -x
+export SHELLOPTS
+
+TESTROOT=$1
+if [ -z "${TESTROOT}" ]; then
+ TESTROOT=/tmp/heap_testnetwork
+fi
+
+mkdir -p "${TESTROOT}"
+
+netgoal generate --nodes 8 --relays 3 -r "${TESTROOT}" -o "${TESTROOT}"/netgoal.json --template goalnet -w 15
+
+TESTDIR="${TESTROOT}"/net
+
+REPO_ROOT="$( cd "$(dirname "$0")" ; pwd -P )"/../..
+
+goal network create -r "${TESTDIR}" -t "${TESTROOT}"/netgoal.json -n r3n8
+
+goal network start -r "${TESTDIR}"
+
+# give all the algod a moment...
+sleep 2
+
+mkdir -p "${TESTDIR}/heaps"
+python3 "${REPO_ROOT}/test/heapwatch/heapWatch.py" -o "${TESTDIR}/heaps" --no-heap --metrics --blockinfo --period 90 "${TESTDIR}"/{node,relay}* > "${TESTDIR}/heaps/watch.log" 2>&1 &
+
+echo "$!" > .heapWatch.pid
+
+# TODO: other pingpong modes
+pingpong run -d "${TESTDIR}/node1" --tps 20 --rest 0 --run 0 &
+
+echo "$!" > .pingpong1.pid
+
+pingpong run -d "${TESTDIR}/node2" --tps 20 --rest 0 --run 0 &
+
+echo "$!" > .pingpong2.pid
diff --git a/test/heapwatch/heapWatch.py b/test/heapwatch/heapWatch.py
new file mode 100644
index 000000000..d41bf8528
--- /dev/null
+++ b/test/heapwatch/heapWatch.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python3
+#
+# repeatedly snapshot heap profiles for one or more algod
+#
+# usage:
+# mkdir -p /tmp/heaps
+# python3 test/scripts/heapWatch.py -o /tmp/heaps --period 60s private_network_root/*
+
+import argparse
+import json
+import logging
+import os
+import signal
+import subprocess
+import sys
+import time
+import urllib.request
+
+# pip install py-algorand-sdk
+import algosdk
+import algosdk.v2client
+import algosdk.v2client.algod
+
+logger = logging.getLogger(__name__)
+
+
+
+def read_algod_dir(algorand_data):
+ with open(os.path.join(algorand_data, 'algod.net')) as fin:
+ net = fin.read().strip()
+ with open(os.path.join(algorand_data, 'algod.token')) as fin:
+ token = fin.read().strip()
+ with open(os.path.join(algorand_data, 'algod.admin.token')) as fin:
+ admin_token = fin.read().strip()
+ return net, token, admin_token
+
+# data from /debug/pprof/* is already gzipped
+
+# curl -o /tmp/algod.pprof.heap "http://`cat ${ALGORAND_DATA}/algod.net`/urlAuth/`cat ${ALGORAND_DATA}/algod.admin.token`/debug/pprof/heap"
+# both reports can be generated from one heap profile snapshot
+# go tool pprof -sample_index=inuse_space -svg -output /tmp/algod.heap.svg /tmp/algod.pprof.heap
+# go tool pprof -sample_index=alloc_space -svg -output /tmp/algod.alloc.svg /tmp/algod.pprof.heap
+
+## curl -o /tmp/algod.pprof.allocs "http://`cat ${ALGORAND_DATA}/algod.net`/urlAuth/`cat ${ALGORAND_DATA}/algod.admin.token`/debug/pprof/allocs"
+# go tool pprof -svg -output /tmp/algod.allocs.svg /tmp/algod.pprof.allocs
+
+# http://localhost:6060/debug/pprof/allocs?debug=1
+
+# -inuse_space Same as -sample_index=inuse_space
+# -inuse_objects Same as -sample_index=inuse_objects
+# -alloc_space Same as -sample_index=alloc_space
+# -alloc_objects Same as -sample_index=alloc_objects
+
+graceful_stop = False
+
+def do_graceful_stop(signum, frame):
+ global graceful_stop
+ if graceful_stop:
+ sys.stderr.write("second signal, quitting\n")
+ sys.exit(1)
+ sys.stderr.write("graceful stop...\n")
+ graceful_stop = True
+
+signal.signal(signal.SIGTERM, do_graceful_stop)
+signal.signal(signal.SIGINT, do_graceful_stop)
+
+
+class algodDir:
+ def __init__(self, path):
+ self.path = path
+ self.nick = os.path.basename(self.path)
+ net, token, admin_token = read_algod_dir(self.path)
+ self.net = net
+ self.token = token
+ self.admin_token = admin_token
+ self.headers = {}
+ self._pid = None
+ self._algod = None
+
+ def pid(self):
+ if self._pid is None:
+ with open(os.path.join(self.path, 'algod.pid')) as fin:
+ self._pid = int(fin.read())
+ return self._pid
+
+ def algod(self):
+ if self._algod is None:
+ net = self.net
+ if not net.startswith('http'):
+ net = 'http://' + net
+ self._algod = algosdk.v2client.algod.AlgodClient(self.token, net, self.headers)
+ return self._algod
+
+ def get_pprof_snapshot(self, name, snapshot_name=None, outdir=None):
+ url = 'http://' + self.net + '/urlAuth/' + self.admin_token + '/debug/pprof/' + name
+ response = urllib.request.urlopen(urllib.request.Request(url, headers=self.headers))
+ if response.code != 200:
+ logger.error('could not fetch %s from %s via %r', name, self.path. url)
+ return
+ blob = response.read()
+ if snapshot_name is None:
+ snapshot_name = time.strftime('%Y%m%d_%H%M%S', time.gmtime())
+ outpath = os.path.join(outdir or '.', self.nick + '.' + snapshot_name + '.' + name)
+ with open(outpath, 'wb') as fout:
+ fout.write(blob)
+ logger.debug('%s -> %s', self.nick, outpath)
+ return outpath
+
+ def get_heap_snapshot(self, snapshot_name=None, outdir=None):
+ return self.get_pprof_snapshot('heap', snapshot_name, outdir)
+
+ def get_goroutine_snapshot(self, snapshot_name=None, outdir=None):
+ return self.get_pprof_snapshot('goroutine', snapshot_name, outdir)
+
+ def get_metrics(self, snapshot_name=None, outdir=None):
+ url = 'http://' + self.net + '/metrics'
+ response = urllib.request.urlopen(urllib.request.Request(url, headers=self.headers))
+ if response.code != 200:
+ logger.error('could not fetch %s from %s via %r', name, self.path. url)
+ return
+ blob = response.read()
+ outpath = os.path.join(outdir or '.', self.nick + '.' + snapshot_name + '.metrics')
+ with open(outpath, 'wb') as fout:
+ fout.write(blob)
+ logger.debug('%s -> %s', self.nick, outpath)
+
+ def get_blockinfo(self, snapshot_name=None, outdir=None):
+ algod = self.algod()
+ status = algod.status()
+ bi = algod.block_info(status['last-round'])
+ if snapshot_name is None:
+ snapshot_name = time.strftime('%Y%m%d_%H%M%S', time.gmtime())
+ outpath = os.path.join(outdir or '.', self.nick + '.' + snapshot_name + '.blockinfo.json')
+ bi['block'].pop('txns', None)
+ with open(outpath, 'wt') as fout:
+ json.dump(bi, fout)
+ return bi
+ #txncount = bi['block']['tc']
+
+ def psHeap(self):
+ # return rss, vsz
+ # ps -o rss,vsz $(cat ${ALGORAND_DATA}/algod.pid)
+ subp = subprocess.Popen(['ps', '-o', 'rss,vsz', str(self.pid())], stdout=subprocess.PIPE)
+ try:
+ outs, errs = subp.communicate(timeout=2)
+ for line in outs.decode().splitlines():
+ try:
+ rss,vsz = [int(x) for x in line.strip().split()]
+ return rss,vsz
+ except:
+ pass
+ except:
+ return None, None
+
+class watcher:
+ def __init__(self, args):
+ self.args = args
+ self.prevsnapshots = {}
+ self.they = []
+ for path in args.data_dirs:
+ if not os.path.isdir(path):
+ continue
+ if os.path.exists(os.path.join(path, 'algod.net')):
+ try:
+ ad = algodDir(path)
+ self.they.append(ad)
+ except:
+ logger.error('bad algod: %r', path, exc_info=True)
+ else:
+ logger.debug('not a datadir: %r', path)
+ logger.debug('data dirs: %r', self.they)
+
+ def do_snap(self, now):
+ snapshot_name = time.strftime('%Y%m%d_%H%M%S', time.gmtime(now))
+ snapshot_isotime = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(now))
+ logger.debug('begin snapshot %s', snapshot_name)
+ psheaps = {}
+ newsnapshots = {}
+ if self.args.heaps:
+ for ad in self.they:
+ snappath = ad.get_heap_snapshot(snapshot_name, outdir=self.args.out)
+ newsnapshots[ad.path] = snappath
+ rss, vsz = ad.psHeap()
+ if rss and vsz:
+ psheaps[ad.nick] = (rss, vsz)
+ for nick, rssvsz in psheaps.items():
+ rss, vsz = rssvsz
+ with open(os.path.join(self.args.out, nick + '.heap.csv'), 'at') as fout:
+ fout.write('{},{},{},{}\n'.format(snapshot_name,snapshot_isotime,rss, vsz))
+ if self.args.goroutine:
+ for ad in self.they:
+ ad.get_goroutine_snapshot(snapshot_name, outdir=self.args.out)
+ if self.args.metrics:
+ for ad in self.they:
+ ad.get_metrics(snapshot_name, outdir=self.args.out)
+ if self.args.blockinfo:
+ for ad in self.they:
+ ad.get_blockinfo(snapshot_name, outdir=self.args.out)
+ logger.debug('snapped, processing...')
+ # make absolute and differential plots
+ for path, snappath in newsnapshots.items():
+ subprocess.call(['go', 'tool', 'pprof', '-sample_index=inuse_space', '-svg', '-output', snappath + '.inuse.svg', snappath])
+ subprocess.call(['go', 'tool', 'pprof', '-sample_index=alloc_space', '-svg', '-output', snappath + '.alloc.svg', snappath])
+ prev = self.prevsnapshots.get(path)
+ if prev:
+ subprocess.call(['go', 'tool', 'pprof', '-sample_index=inuse_space', '-svg', '-output', snappath + '.inuse_diff.svg', '-base='+prev, snappath])
+ subprocess.call(['go', 'tool', 'pprof', '-sample_index=alloc_space', '-svg', '-output', snappath + '.alloc_diff.svg', '-diff_base='+prev, snappath])
+ self.prevsnapshots = newsnapshots
+
+def main():
+ ap = argparse.ArgumentParser()
+ ap.add_argument('data_dirs', nargs='*', help='list paths to algorand datadirs to grab heap profile from')
+ ap.add_argument('--no-heap', dest='heaps', default=True, action='store_false', help='disable heap snapshot capture')
+ ap.add_argument('--goroutine', default=False, action='store_true', help='also capture goroutine profile')
+ ap.add_argument('--metrics', default=False, action='store_true', help='also capture /metrics counts')
+ ap.add_argument('--blockinfo', default=False, action='store_true', help='also capture block header info')
+ ap.add_argument('--period', default=None, help='seconds between automatically capturing')
+ ap.add_argument('-o', '--out', default=None, help='directory to write to')
+ ap.add_argument('--verbose', default=False, action='store_true')
+ args = ap.parse_args()
+
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+
+ app = watcher(args)
+
+ # get a first snapshot immediately
+ start = time.time()
+ now = start
+
+ app.do_snap(now)
+
+ if args.period:
+ lastc = args.period.lower()[-1:]
+ if lastc == 's':
+ periodSecs = int(args.period[:-1])
+ elif lastc == 'm':
+ periodSecs = int(args.period[:-1]) * 60
+ elif lastc == 'h':
+ periodSecs = int(args.period[:-1]) * 3600
+ else:
+ periodSecs = int(args.period)
+
+ periodi = 1
+ nextt = start + (periodi * periodSecs)
+ while not graceful_stop:
+ while nextt < now:
+ nextt = start + (periodi * periodSecs)
+ while now < nextt - (periodSecs * 0.05):
+ logger.debug('sleep %f', nextt - now)
+ time.sleep(nextt - now)
+ if graceful_stop:
+ return
+ now = time.time()
+ periodi += 1
+ nextt += periodSecs
+ app.do_snap(now)
+ return 0
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/test/heapwatch/metrics_delta.py b/test/heapwatch/metrics_delta.py
new file mode 100644
index 000000000..7b1723439
--- /dev/null
+++ b/test/heapwatch/metrics_delta.py
@@ -0,0 +1,140 @@
+#!/usr/bin/env python3
+
+import argparse
+import contextlib
+import csv
+import gzip
+import logging
+import json
+import os
+import sys
+import time
+
+logger = logging.getLogger(__name__)
+
+def num(x):
+ if '.' in x:
+ return float(x)
+ return int(x)
+
+def parse_metrics(fin):
+ out = dict()
+ for line in fin:
+ if not line:
+ continue
+ line = line.strip()
+ if not line:
+ continue
+ if line[0] == '#':
+ continue
+ ab = line.split()
+ out[ab[0]] = num(ab[1])
+ return out
+
+# return b-a
+def metrics_delta(a,b):
+ old_unseen = set(a.keys())
+ d = dict()
+ for k,bv in b.items():
+ if k in a:
+ av = a.get(k, 0)
+ d[k] = bv-av
+ old_unseen.remove(k)
+ else:
+ d[k] = bv
+ for k in old_unseen:
+ d[k] = 0-a[k]
+ return d
+
+# slightly smarter open, stdout for '-', auto .gz
+def sopen(path, mode):
+ if path == '-':
+ return contextlib.nullcontext(sys.stdout)
+ if path.endswith('.gz'):
+ return contextlib.closing(gzip.open(path, mode))
+ return open(path, mode)
+
+def main():
+ ap = argparse.ArgumentParser()
+ ap.add_argument('metrics_files', nargs='*')
+ ap.add_argument('--deltas', default=None, help='path to write csv deltas')
+ ap.add_argument('--report', default=None, help='path to write csv report')
+ ap.add_argument('--verbose', default=False, action='store_true')
+ args = ap.parse_args()
+
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+
+ reportf = None
+ writer = None
+ if args.report:
+ if args.report == '-':
+ writer = csv.writer(sys.stdout)
+ else:
+ reportf = open(args.report, 'wt')
+ writer = csv.writer(reportf)
+ writer.writerow(('when', 'tx bytes/s', 'rx bytes/s','TPS', 's/block'))
+ prev = None
+ prevtime = None
+ prevPath = None
+ prevbi = None
+
+ deltas = []
+ for path in sorted(args.metrics_files):
+ with open(path, 'rt') as fin:
+ cur = parse_metrics(fin)
+ bijsonpath = path.replace('.metrics', '.blockinfo.json')
+ bi = None
+ if os.path.exists(bijsonpath):
+ with open(bijsonpath, 'rt') as fin:
+ bi = json.load(fin)
+ curtime = os.path.getmtime(path)
+ logger.debug('%s: %r', path, cur)
+ if prev is not None:
+ d = metrics_delta(prev, cur)
+ dt = curtime - prevtime
+ #print("{} ->\n{}".format(prevPath, path))
+ #print(json.dumps(d, indent=2, sort_keys=True))
+ deltas.append((curtime, d))
+ tps = None
+ blocktime = None
+ if bi and prevbi:
+ tps = (bi.get('block',{}).get('tc', 0) - prevbi.get('block',{}).get('tc', 0)) / dt
+ rounds = (bi.get('block',{}).get('rnd', 0) - prevbi.get('block',{}).get('rnd', 0))
+ if rounds != 0:
+ blocktime = dt/rounds
+ if writer:
+ txBytesPerSec = d.get('algod_network_sent_bytes_total{}',0) / dt
+ rxBytesPerSec = d.get('algod_network_received_bytes_total{}',0) /dt
+ writer.writerow((
+ time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(curtime)),
+ txBytesPerSec,
+ rxBytesPerSec,
+ tps,
+ blocktime,
+ ))
+ prev = cur
+ prevPath = path
+ prevtime = curtime
+ prevbi = bi
+ if reportf:
+ reportf.close()
+ if deltas and args.deltas:
+ keys = set()
+ for ct, d in deltas:
+ keys.update(set(d.keys()))
+ keys = sorted(keys)
+ with sopen(args.deltas, 'wt') as fout:
+ writer = csv.writer(fout)
+ writer.writerow(['when'] + keys)
+ for ct, d in deltas:
+ row = [time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(ct))]
+ for k in keys:
+ row.append(d.get(k, None))
+ writer.writerow(row)
+ return 0
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/test/heapwatch/start.sh b/test/heapwatch/start.sh
new file mode 100755
index 000000000..cb4b37eca
--- /dev/null
+++ b/test/heapwatch/start.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+set -e
+set -o pipefail
+set -x
+export SHELLOPTS
+
+TESTDIR=$1
+if [ -z "${TESTDIR}" ]; then
+ TESTDIR=/tmp/heap_testnetwork
+fi
+
+REPO_ROOT="$( cd "$(dirname "$0")" ; pwd -P )"/../..
+
+goal network create -r "${TESTDIR}" -t "${REPO_ROOT}/test/testdata/nettemplates/ThreeNodesEvenDist.json" -n tbd
+
+goal network start -r "${TESTDIR}"
+
+# give all the algod a moment...
+sleep 2
+
+mkdir -p "${TESTDIR}/heaps"
+python3 "${REPO_ROOT}/test/heapwatch/heapWatch.py" -o "${TESTDIR}/heaps" --period 10m --metrics --blockinfo "${TESTDIR}/"* &
+
+echo "$!" > .heapWatch.pid
+
+# TODO: other pingpong modes
+pingpong run -d "${TESTDIR}/Node1" --tps 10 --rest 0 --run 0 --nftasapersecond 200 &
+
+echo "$!" > .pingpong1.pid
+
+pingpong run -d "${TESTDIR}/Node2" --tps 10 --rest 0 --run 0 --nftasapersecond 200 &
+
+echo "$!" > .pingpong2.pid
diff --git a/test/heapwatch/stop.sh b/test/heapwatch/stop.sh
new file mode 100755
index 000000000..b5066a89a
--- /dev/null
+++ b/test/heapwatch/stop.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+#
+# clean up what was started by start.sh or bwstart.sh
+
+set -e
+set -o pipefail
+set -x
+export SHELLOPTS
+
+if [ -f .heapWatch.pid ]; then
+ kill $(cat .heapWatch.pid) || true
+fi
+
+for i in .pingpong*.pid; do
+ kill $(cat $i) || true
+ rm -f "${i}"
+done
+
+TESTDIR=$1
+if [ -z "${TESTDIR}" ]; then
+ TESTDIR=/tmp/heap_testnetwork
+fi
+if [ -d "${TESTDIR}/net" ]; then
+ # started with bwstart.sh
+ TESTDIR="${TESTDIR}/net"
+fi
+
+goal network stop -r "${TESTDIR}"
+
+if [ -f .heapWatch.pid ]; then
+ kill -9 $(cat .heapWatch.pid) || true
+ rm -f .heapWatch.pid
+fi
diff --git a/test/scripts/e2e.sh b/test/scripts/e2e.sh
index 2ec92c2b8..e12ab620d 100755
--- a/test/scripts/e2e.sh
+++ b/test/scripts/e2e.sh
@@ -45,6 +45,20 @@ TEST_RUN_ID=$(${SCRIPT_PATH}/testrunid.py)
export TEMPDIR=${SRCROOT}/tmp/out/e2e/${TEST_RUN_ID}
echo "Test output can be found in ${TEMPDIR}"
+
+# ARM64 has an unoptimized scrypt() which can cause tests to timeout.
+# Run kmd with scrypt() configured to run less secure and fast to go through the motions for test.
+# thus, on those platforms we launch kmd with unsafe_scrypt = true to speed up the tests.
+RUN_KMD_WITH_UNSAFE_SCRYPT=""
+PLATFORM_ARCHTYPE=$("${SRCROOT}/scripts/archtype.sh")
+
+echo "ARCHTYPE: ${PLATFORM_ARCHTYPE}"
+if [[ "${PLATFORM_ARCHTYPE}" = arm* ]]; then
+ RUN_KMD_WITH_UNSAFE_SCRYPT="--unsafe_scrypt"
+fi
+
+echo "RUN_KMD_WITH_UNSAFE_SCRYPT = ${RUN_KMD_WITH_UNSAFE_SCRYPT}"
+
export BINDIR=${TEMPDIR}/bin
export DATADIR=${TEMPDIR}/data
@@ -85,9 +99,12 @@ python3 -m venv "${TEMPDIR}/ve"
. "${TEMPDIR}/ve/bin/activate"
"${TEMPDIR}/ve/bin/pip3" install --upgrade pip
"${TEMPDIR}/ve/bin/pip3" install --upgrade py-algorand-sdk cryptography
-"${TEMPDIR}/ve/bin/python3" e2e_client_runner.py "$SRCROOT"/test/scripts/e2e_subs/*.sh
+"${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} "$SRCROOT"/test/scripts/e2e_subs/*.sh
for vdir in "$SRCROOT"/test/scripts/e2e_subs/v??; do
- "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py --version "$(basename "$vdir")" "$vdir"/*.sh
+ "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} --version "$(basename "$vdir")" "$vdir"/*.sh
+done
+for script in "$SRCROOT"/test/scripts/e2e_subs/serial/*; do
+ "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} $script
done
deactivate
diff --git a/test/scripts/e2e_client_runner.py b/test/scripts/e2e_client_runner.py
index 7b1d2d069..7a28cddd2 100755
--- a/test/scripts/e2e_client_runner.py
+++ b/test/scripts/e2e_client_runner.py
@@ -69,6 +69,33 @@ def read_script_for_timeout(fname):
logger.debug('read timeout match err', exc_info=True)
return 200
+
+def create_kmd_config_with_unsafe_scrypt(working_dir):
+
+ kmd_config_dir = os.path.join(working_dir,"kmd-v0.5")
+ with open(os.path.join(kmd_config_dir,"kmd_config.json.example")) as f:
+ kmd_conf_data = json.load(f)
+ if "drivers" not in kmd_conf_data:
+ raise Exception("kmd_conf example does not contian drivers attribute")
+ if "sqlite" not in kmd_conf_data["drivers"]:
+ raise Exception("kmd_conf example does not contian sqlite attribute")
+ if "allow_unsafe_scrypt" not in kmd_conf_data["drivers"]["sqlite"]:
+ raise Exception("kmd_conf example does not contian allow_unsafe_scrypt attribute")
+ if "scrypt" not in kmd_conf_data["drivers"]["sqlite"]:
+ raise Exception("kmd_conf example does not contian scrypt attribute")
+ if "scrypt_n" not in kmd_conf_data["drivers"]["sqlite"]["scrypt"]:
+ raise Exception("kmd_conf example does not contian scrypt_n attribute")
+ if "scrypt_r" not in kmd_conf_data["drivers"]["sqlite"]["scrypt"]:
+ raise Exception("kmd_conf example does not contian scrypt_r attribute")
+
+ kmd_conf_data["drivers"]["sqlite"]["allow_unsafe_scrypt"] = True
+ kmd_conf_data["drivers"]["sqlite"]["scrypt"]["scrypt_n"] = 4096
+ with open(os.path.join(kmd_config_dir,"kmd_config.json"),"w") as f:
+ json.dump(kmd_conf_data,f)
+
+
+
+
def _script_thread_inner(runset, scriptname):
start = time.time()
algod, kmd = runset.connect()
@@ -118,6 +145,13 @@ def _script_thread_inner(runset, scriptname):
sys.stderr.write('{}\n'.format(te))
retcode = -1
dt = time.time() - start
+
+
+ if runset.terminated:
+ logger.info('Program terminated before %s finishes.', scriptname)
+ runset.done(scriptname, False, dt)
+ return
+
if retcode != 0:
with runset.lock:
logger.error('%s failed in %f seconds', scriptname, dt)
@@ -183,8 +217,11 @@ class RunSet:
def _connect(self):
if self.algod and self.kmd:
return
+
+
# should run from inside self.lock
algodata = self.env['ALGORAND_DATA']
+
xrun(['goal', 'kmd', 'start', '-t', '3600','-d', algodata], env=self.env, timeout=5)
self.kmd = openkmd(algodata)
self.algod = openalgod(algodata)
@@ -367,6 +404,8 @@ def main():
ap.add_argument('--timeout', default=500, type=int, help='integer seconds to wait for the scripts to run')
ap.add_argument('--verbose', default=False, action='store_true')
ap.add_argument('--version', default="Future")
+ ap.add_argument('--unsafe_scrypt', default=False, action='store_true', help="allows kmd to run with unsafe scrypt attribute. This will speed up tests time")
+
args = ap.parse_args()
if args.verbose:
@@ -401,6 +440,11 @@ def main():
env['ALGORAND_DATA'] = os.path.join(netdir, 'Node')
env['ALGORAND_DATA2'] = os.path.join(netdir, 'Primary')
+ if args.unsafe_scrypt:
+ create_kmd_config_with_unsafe_scrypt(env['ALGORAND_DATA'])
+ create_kmd_config_with_unsafe_scrypt(env['ALGORAND_DATA2'])
+
+
xrun(['goal', '-v'], env=env, timeout=5)
xrun(['goal', 'node', 'status'], env=env, timeout=5)
diff --git a/test/scripts/e2e_go_tests.sh b/test/scripts/e2e_go_tests.sh
index 076f20601..0802b1f4e 100755
--- a/test/scripts/e2e_go_tests.sh
+++ b/test/scripts/e2e_go_tests.sh
@@ -3,6 +3,7 @@ echo "######################################################################"
echo " e2e_go_tests"
echo "######################################################################"
set -e
+set -o pipefail
export GOPATH=$(go env GOPATH)
export GO111MODULE=on
@@ -95,14 +96,14 @@ if [ "${#TESTPATTERNS[@]}" -eq 0 ]; then
for TEST_DIR in ${TESTS_DIRECTORIES[@]}; do
TESTS=$(go test -list ".*" ${TEST_DIR} -vet=off | grep -v "github.com" || true)
for TEST_NAME in ${TESTS[@]}; do
- go test ${RACE_OPTION} -timeout 1h -vet=off -v ${SHORTTEST} -run ${TEST_NAME} ${TEST_DIR}
- KMD_INSTANCES_COUNT=$(ps -Af | grep kmd | grep -v "grep" | wc -l | tr -d ' ')
+ go test ${RACE_OPTION} -timeout 1h -vet=off -v ${SHORTTEST} -run ${TEST_NAME} ${TEST_DIR} | logfilter
+ KMD_INSTANCES_COUNT=$(set +o pipefail; ps -Af | grep kmd | grep -v "grep" | wc -l | tr -d ' ')
if [ "${KMD_INSTANCES_COUNT}" != "0" ]; then
echo "One or more than one KMD instances remains running:"
ps -Af | grep kmd | grep -v "grep"
exit 1
fi
- ALGOD_INSTANCES_COUNT=$(ps -Af | grep algod | grep -v "grep" | wc -l | tr -d ' ')
+ ALGOD_INSTANCES_COUNT=$(set +o pipefail; ps -Af | grep algod | grep -v "grep" | wc -l | tr -d ' ')
if [ "${ALGOD_INSTANCES_COUNT}" != "0" ]; then
echo "One or more than one algod instances remains running:"
ps -Af | grep algod | grep -v "grep"
@@ -111,11 +112,11 @@ if [ "${#TESTPATTERNS[@]}" -eq 0 ]; then
done
done
else
- go test ${RACE_OPTION} -timeout 1h -v ${SHORTTEST} ./...
+ go test ${RACE_OPTION} -timeout 1h -v ${SHORTTEST} ./... | logfilter
fi
else
for TEST in ${TESTPATTERNS[@]}; do
- go test ${RACE_OPTION} -timeout 1h -v ${SHORTTEST} -run ${TEST} ./...
+ go test ${RACE_OPTION} -timeout 1h -v ${SHORTTEST} -run ${TEST} ./... | logfilter
done
fi
diff --git a/test/scripts/e2e_subs/rest-applications-endpoint.sh b/test/scripts/e2e_subs/rest-applications-endpoint.sh
new file mode 100755
index 000000000..6d66e5798
--- /dev/null
+++ b/test/scripts/e2e_subs/rest-applications-endpoint.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+# TIMEOUT=300
+
+my_dir="$(dirname "$0")"
+#"$my_dir/rest.sh" "$@"
+source "$my_dir/rest.sh" "$@"
+
+date "+$0 start %Y%m%d_%H%M%S"
+
+# Create an application
+printf '#pragma version 2\nint 1' > "${TEMPDIR}/simple.teal"
+APPID=$(${gcmd} app create --creator "${ACCOUNT}" --approval-prog "${TEMPDIR}/simple.teal" --clear-prog "${TEMPDIR}/simple.teal" --global-byteslices 0 --global-ints 2 --local-byteslices 0 --local-ints 0 | grep Created | awk '{ print $6 }')
+
+# Good request, non-existant app id
+call_and_verify "Should not find app." "/v2/applications/987654321" 404 'application does not exist'
+# Good request
+call_and_verify "Should contain app data." "/v2/applications/$APPID" 200 '"global-state-schema":{"num-byte-slice":0,"num-uint":2}'
+# Good request, pretty response
+call_and_verify "Should contain app data." "/v2/applications/$APPID?pretty" 200 '
+ "global-state-schema": {
+ "num-byte-slice": 0,
+ "num-uint": 2
+ },
+ "local-state-schema": {
+ "num-byte-slice": 0,
+ "num-uint": 0
+ }
+ '
+# Some invalid path parameters
+call_and_verify "App parameter parsing error 1." "/v2/applications/-2" 400 "Invalid format for parameter application-id"
+call_and_verify "App parameter parsing error 2." "/v2/applications/not-a-number" 400 "Invalid format for parameter application-id"
+
+# Good request, but invalid query parameters
+call_and_verify "App invalid parameter" "/v2/applications/$APPID?this-should-fail=200" 400 'Unknown parameter detected: this-should-fail'
+
diff --git a/test/scripts/e2e_subs/rest-assets-endpoint.sh b/test/scripts/e2e_subs/rest-assets-endpoint.sh
new file mode 100755
index 000000000..fc6ca6e80
--- /dev/null
+++ b/test/scripts/e2e_subs/rest-assets-endpoint.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+# TIMEOUT=300
+
+my_dir="$(dirname "$0")"
+#"$my_dir/rest.sh" "$@"
+source "$my_dir/rest.sh" "$@"
+
+date "+$0 start %Y%m%d_%H%M%S"
+
+ASSET_ID=$(${gcmd} asset create --creator "${ACCOUNT}" --total 10000 --decimals 19 --name "spanish coin" --unitname "doubloon" | grep "Created asset with asset index" | rev | cut -d ' ' -f 1 | rev)
+
+# Good request, non-existant asset id
+call_and_verify "Should not find asset." "/v2/assets/987654321" 404 'asset does not exist'
+# Good request
+call_and_verify "Should contain asset data." "/v2/assets/$ASSET_ID" 200 '","decimals":19,"default-frozen":false,"freeze":"'
+# Good request, pretty response
+call_and_verify "Should contain asset data." "/v2/assets/$ASSET_ID?pretty" 200 '
+ "decimals": 19,
+ "default-frozen": false,
+ "freeze": "'
+# Some invalid path parameters
+call_and_verify "Asset parameter parsing error 1." "/v2/assets/-2" 400 "Invalid format for parameter asset-id"
+call_and_verify "Asset parameter parsing error 2." "/v2/assets/not-a-number" 400 "Invalid format for parameter asset-id"
+
+# Good request, but invalid query parameters
+call_and_verify "Asset invalid parameter" "/v2/assets/$ASSET_ID?this-should-fail=200" 400 'parameter detected: this-should-fail'
+
diff --git a/test/scripts/e2e_subs/rest-genesis-endpoint.sh b/test/scripts/e2e_subs/rest-genesis-endpoint.sh
new file mode 100755
index 000000000..e87d172bf
--- /dev/null
+++ b/test/scripts/e2e_subs/rest-genesis-endpoint.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+# TIMEOUT=300
+
+my_dir="$(dirname "$0")"
+#"$my_dir/rest.sh" "$@"
+source "$my_dir/rest.sh" "$@"
+
+date "+$0 start %Y%m%d_%H%M%S"
+
+call_and_verify "There should be a genesis endpoint." "/genesis" 200 '
+ "id": "v1",
+ "network": "tbd",
+ "proto": "future",
+ "rwd": "7777777777777777777777777777777777777777777777777774MSJUVU"
+}'
diff --git a/test/scripts/e2e_subs/rest-pprof.sh b/test/scripts/e2e_subs/rest-pprof.sh
new file mode 100755
index 000000000..2fe976191
--- /dev/null
+++ b/test/scripts/e2e_subs/rest-pprof.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+# TIMEOUT=300
+
+my_dir="$(dirname "$0")"
+#"$my_dir/rest.sh" "$@"
+source "$my_dir/rest.sh" "$@"
+
+date "+$0 start %Y%m%d_%H%M%S"
+
+# URL Auth - valid
+CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/urlAuth/$PRIMARY_ADMIN_TOKEN/debug/pprof/block")
+if [[ "$CODE" != "200" ]]; then
+ fail_and_exit "Call pprof with valid token" "/urlAuth/:token/debug/pprof" "Invalid exit code expected 200 (actual $CODE)"
+fi
+
+# URL Auth - invalid
+CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/urlAuth/invalid_token/debug/pprof/block")
+if [[ "$CODE" != "401" ]]; then
+ fail_and_exit "Call pprof with invalid token" "/urlAuth/invalid_token/debug/pprof" "Invalid exit code expected 401 (actual $CODE)"
+fi
+
+# Header Auth - valid
+CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/debug/pprof/block" -H "Authorization: Bearer $PRIMARY_ADMIN_TOKEN")
+if [[ "$CODE" != "200" ]]; then
+ fail_and_exit "Call pprof with valid token" "/debug/pprof" "Invalid exit code expected 200 (actual $CODE)"
+fi
+
+# Header Auth - invalid
+CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/debug/pprof/block" -H "Authorization: Bearer invalid_token")
+if [[ "$CODE" != "401" ]]; then
+ fail_and_exit "Call pprof with invalid token" "/debug/pprof" "Invalid exit code expected 401 (actual $CODE)"
+fi
diff --git a/test/scripts/e2e_subs/rest.sh b/test/scripts/e2e_subs/rest.sh
index 3f915406b..84b9ed570 100755
--- a/test/scripts/e2e_subs/rest.sh
+++ b/test/scripts/e2e_subs/rest.sh
@@ -1,7 +1,17 @@
#!/usr/bin/env bash
-# TIMEOUT=300
+# TIMEOUT=50
-date '+rest.sh start %Y%m%d_%H%M%S'
+# Helpers for REST API tests.
+# Use the following boilerplate code at the top of new REST tests:
+
+# #!/usr/bin/env bash
+# # TIMEOUT=300
+#
+# my_dir="$(dirname "$0")"
+# #"$my_dir/rest.sh" "$@"
+# source "$my_dir/rest.sh" "$@"
+#
+# date "+$0 start %Y%m%d_%H%M%S"
set -ex
set -o pipefail
@@ -63,96 +73,3 @@ function call_and_verify {
fail_and_exit "$1" "$2" "unexpected response. should contain '$4', actual: $RES"
fi
}
-
-
-function test_applications_endpoint {
- # Create an application
- printf '#pragma version 2\nint 1' > "${TEMPDIR}/simple.teal"
- APPID=$(${gcmd} app create --creator "${ACCOUNT}" --approval-prog "${TEMPDIR}/simple.teal" --clear-prog "${TEMPDIR}/simple.teal" --global-byteslices 0 --global-ints 2 --local-byteslices 0 --local-ints 0 | grep Created | awk '{ print $6 }')
-
- # Good request, non-existant app id
- call_and_verify "Should not find app." "/v2/applications/987654321" 404 'application does not exist'
- # Good request
- call_and_verify "Should contain app data." "/v2/applications/$APPID" 200 '"global-state-schema":{"num-byte-slice":0,"num-uint":2}'
- # Good request, pretty response
- call_and_verify "Should contain app data." "/v2/applications/$APPID?pretty" 200 '
- "global-state-schema": {
- "num-byte-slice": 0,
- "num-uint": 2
- },
- "local-state-schema": {
- "num-byte-slice": 0,
- "num-uint": 0
- }
-'
- # Some invalid path parameters
- call_and_verify "App parameter parsing error 1." "/v2/applications/-2" 400 "Invalid format for parameter application-id"
- call_and_verify "App parameter parsing error 2." "/v2/applications/not-a-number" 400 "Invalid format for parameter application-id"
-
- # Good request, but invalid query parameters
- call_and_verify "App invalid parameter" "/v2/applications/$APPID?this-should-fail=200" 400 'Unknown parameter detected: this-should-fail'
-}
-
-
-function test_assets_endpoint {
- local ASSET_ID
- ASSET_ID=$(${gcmd} asset create --creator "${ACCOUNT}" --total 10000 --decimals 19 --name "spanish coin" --unitname "doubloon" | grep "Created asset with asset index" | rev | cut -d ' ' -f 1 | rev)
-
- # Good request, non-existant asset id
- call_and_verify "Should not find asset." "/v2/assets/987654321" 404 'asset does not exist'
- # Good request
- call_and_verify "Should contain asset data." "/v2/assets/$ASSET_ID" 200 '","decimals":19,"default-frozen":false,"freeze":"'
- # Good request, pretty response
- call_and_verify "Should contain asset data." "/v2/assets/$ASSET_ID?pretty" 200 '
- "decimals": 19,
- "default-frozen": false,
- "freeze": "'
- # Some invalid path parameters
- call_and_verify "Asset parameter parsing error 1." "/v2/assets/-2" 400 "Invalid format for parameter asset-id"
- call_and_verify "Asset parameter parsing error 2." "/v2/assets/not-a-number" 400 "Invalid format for parameter asset-id"
-
- # Good request, but invalid query parameters
- call_and_verify "Asset invalid parameter" "/v2/assets/$ASSET_ID?this-should-fail=200" 400 'parameter detected: this-should-fail'
-}
-
-function pprof_test {
- # URL Auth - valid
- CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/urlAuth/$PRIMARY_ADMIN_TOKEN/debug/pprof/block")
- if [[ "$CODE" != "200" ]]; then
- fail_and_exit "Call pprof with valid token" "/urlAuth/:token/debug/pprof" "Invalid exit code expected 200 (actual $CODE)"
- fi
-
- # URL Auth - invalid
- CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/urlAuth/invalid_token/debug/pprof/block")
- if [[ "$CODE" != "401" ]]; then
- fail_and_exit "Call pprof with invalid token" "/urlAuth/invalid_token/debug/pprof" "Invalid exit code expected 401 (actual $CODE)"
- fi
-
- # Header Auth - valid
- CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/debug/pprof/block" -H "Authorization: Bearer $PRIMARY_ADMIN_TOKEN")
- if [[ "$CODE" != "200" ]]; then
- fail_and_exit "Call pprof with valid token" "/debug/pprof" "Invalid exit code expected 200 (actual $CODE)"
- fi
-
- # Header Auth - invalid
- CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/debug/pprof/block" -H "Authorization: Bearer invalid_token")
- if [[ "$CODE" != "401" ]]; then
- fail_and_exit "Call pprof with invalid token" "/debug/pprof" "Invalid exit code expected 401 (actual $CODE)"
- fi
-}
-
-function test_genesis_endpoint {
- call_and_verify "There should be a genesis endpoint." "/genesis" 200 '
- "id": "v1",
- "network": "tbd",
- "proto": "future",
- "rwd": "7777777777777777777777777777777777777777777777777774MSJUVU"
-}'
-}
-
-
-# Run the tests.
-test_applications_endpoint
-test_assets_endpoint
-pprof_test
-test_genesis_endpoint
diff --git a/test/scripts/e2e_subs/serial/rest-proof-endpoint.sh b/test/scripts/e2e_subs/serial/rest-proof-endpoint.sh
new file mode 100755
index 000000000..ba87561ef
--- /dev/null
+++ b/test/scripts/e2e_subs/serial/rest-proof-endpoint.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+# TIMEOUT=60
+
+my_dir="$(dirname "$0")"
+#"$my_dir/rest.sh" "$@"
+source "$my_dir/../rest.sh" "$@"
+
+date "+$0 start %Y%m%d_%H%M%S"
+
+NUM_TRANSACTIONS=0
+
+# Create a transaction with no siblings
+while [[ "${NUM_TRANSACTIONS}" != "1" ]]; do
+ SEND_OUTPUT=$(${gcmd} clerk send -a 0 -f "${ACCOUNT}" -t "${ACCOUNT}")
+ TXID=$(echo "$SEND_OUTPUT" | head -n 1 | sed 's/.*transaction ID: \([^.]*\).*/\1/')
+ ROUND=$(echo "$SEND_OUTPUT" | tail -n 1 | sed 's/.*committed in round \([[:digit:]]*\).*/\1/')
+
+ # check if the transaction was all alone in the round
+ call_and_verify "Checking block" "/v2/blocks/${ROUND}" 200 'txns'
+ #TODO: The check with jq can be re-enabled after fixing JSONStrictHandle.
+ #NUM_TRANSACTIONS=$(cat "${TEMPDIR}/curl_out.txt" | jq '.block.txns | length')
+ NUM_TRANSACTIONS=$(cat "${TEMPDIR}/curl_out.txt" | grep type | wc -l | tr -d ' ')
+done
+
+call_and_verify "The proof should not be null." "/v2/blocks/${ROUND}/transactions/${TXID}/proof" 200 '"proof":""'
diff --git a/test/scripts/e2e_subs/v26/teal-v3-only.sh b/test/scripts/e2e_subs/v26/teal-v3-only.sh
new file mode 100755
index 000000000..ee3fe7d86
--- /dev/null
+++ b/test/scripts/e2e_subs/v26/teal-v3-only.sh
@@ -0,0 +1,99 @@
+#!/bin/bash
+
+date '+teal-v3-only start %Y%m%d_%H%M%S'
+
+set -e
+set -x
+set -o pipefail
+export SHELLOPTS
+
+WALLET=$1
+
+gcmd="goal -w ${WALLET}"
+
+ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
+
+# prints:
+# Created new account with address UCTHHNBEAUWHDQWQI5DGQCTB7AR4CSVNU5YNPROAYQIT3Y3LKVDFAA5M6Q
+ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }')
+
+cat >${TEMPDIR}/true.teal<<EOF
+#pragma version 3
+int 1
+EOF
+
+${gcmd} clerk compile -o ${TEMPDIR}/true.lsig -s -a ${ACCOUNT} ${TEMPDIR}/true.teal
+
+${gcmd} clerk send -f ${ACCOUNT} -t ${ACCOUNTB} -a 1000000 -L ${TEMPDIR}/true.lsig
+
+${gcmd} clerk send -f ${ACCOUNT} -t ${ACCOUNTB} -a 1000000 -o ${TEMPDIR}/one.tx
+
+${gcmd} clerk sign -L ${TEMPDIR}/true.lsig -i ${TEMPDIR}/one.tx -o ${TEMPDIR}/one.stx
+
+${gcmd} clerk rawsend -f ${TEMPDIR}/one.stx
+
+${gcmd} clerk dryrun -t ${TEMPDIR}/one.stx
+
+ACCOUNT_TRUE=$(${gcmd} clerk compile -n ${TEMPDIR}/true.teal|awk '{ print $2 }')
+
+${gcmd} clerk send --amount 1000000 --from ${ACCOUNT} --to ${ACCOUNT_TRUE}
+
+${gcmd} clerk send --amount 10 --from-program ${TEMPDIR}/true.teal --to ${ACCOUNTB}
+
+${gcmd} clerk send --amount 10 --from ${ACCOUNT_TRUE} --to ${ACCOUNTB} -o ${TEMPDIR}/true.tx
+
+${gcmd} clerk sign -i ${TEMPDIR}/true.tx -o ${TEMPDIR}/true.stx --program ${TEMPDIR}/true.teal
+
+${gcmd} clerk rawsend -f ${TEMPDIR}/true.stx
+
+${gcmd} clerk inspect ${TEMPDIR}/true.stx
+
+${gcmd} clerk compile -D ${TEMPDIR}/true.lsig
+
+echo "#pragma version 1" | ${gcmd} clerk compile -
+echo "#pragma version 2" | ${gcmd} clerk compile -
+echo "#pragma version 3" | ${gcmd} clerk compile -
+
+
+
+set +o pipefail
+# v4 opcodes with v3 pragma fails
+printf "#pragma version 3\nbegin: int 1\nb begin" | ${gcmd} clerk compile - 2>&1 | grep "back jump support"
+set -o pipefail
+
+# Although we are in an earlier version, v4 can be compiled, it just can't be used.
+cat >${TEMPDIR}/true4.teal<<EOF
+#pragma version 4
+int 1
+EOF
+
+
+ACCOUNT_TRUE=$(${gcmd} clerk compile -n ${TEMPDIR}/true4.teal|awk '{ print $2 }')
+
+${gcmd} clerk send --amount 1000000 --from ${ACCOUNT} --to ${ACCOUNT_TRUE}
+
+set +o pipefail
+${gcmd} clerk send --amount 10 --from-program ${TEMPDIR}/true4.teal --to ${ACCOUNTB} 2>&1 | grep "LogicSig.Logic version too new"
+set -o pipefail
+
+
+# Now, ensure it still fails, even if using the v3 program, if the
+# retsub opcode is added. (That is, failure based on opcode choice,
+# not just on the version marker.)
+
+${gcmd} clerk compile ${TEMPDIR}/true.teal -o ${TEMPDIR}/true.lsig
+# append "retsub" opcode to the true program (won't execute the opcode, but presence should cause fail)
+# we can't assemble this, because it would be rejected
+(cat ${TEMPDIR}/true.lsig; printf '\x89') > ${TEMPDIR}/retsub.lsig
+# compute the escrow account for the retsub program
+ACCOUNT_TRUE=$(python -c 'import algosdk, sys; print(algosdk.logic.address(sys.stdin.buffer.read()))' < ${TEMPDIR}/retsub.lsig)
+# fund that escrow account
+${gcmd} clerk send --amount 1000000 --from ${ACCOUNT} --to ${ACCOUNT_TRUE}
+# try, and fail, to lsig with the retsub program
+set +o pipefail
+${gcmd} clerk send --amount 10 --from-program-bytes ${TEMPDIR}/retsub.lsig --to ${ACCOUNTB} 2>&1 | grep "illegal opcode"
+set -o pipefail
+
+
+
+date '+teal-v3-only OK %Y%m%d_%H%M%S'
diff --git a/test/testdata/configs/config-v16.json b/test/testdata/configs/config-v16.json
new file mode 100644
index 000000000..42b6361bd
--- /dev/null
+++ b/test/testdata/configs/config-v16.json
@@ -0,0 +1,92 @@
+{
+ "Version": 16,
+ "AccountUpdatesStatsInterval": 5000000000,
+ "AccountsRebuildSynchronousMode": 1,
+ "AnnounceParticipationKey": true,
+ "Archival": false,
+ "BaseLoggerDebugLevel": 4,
+ "BlockServiceCustomFallbackEndpoints": "",
+ "BroadcastConnectionsLimit": -1,
+ "CadaverSizeTarget": 1073741824,
+ "CatchpointFileHistoryLength": 365,
+ "CatchpointInterval": 10000,
+ "CatchpointTracking": 0,
+ "CatchupBlockDownloadRetryAttempts": 1000,
+ "CatchupBlockValidateMode": 0,
+ "CatchupFailurePeerRefreshRate": 10,
+ "CatchupGossipBlockFetchTimeoutSec": 4,
+ "CatchupHTTPBlockFetchTimeoutSec": 4,
+ "CatchupLedgerDownloadRetryAttempts": 50,
+ "CatchupParallelBlocks": 16,
+ "ConnectionsRateLimitingCount": 60,
+ "ConnectionsRateLimitingWindowSeconds": 1,
+ "DNSBootstrapID": "<network>.algorand.network",
+ "DNSSecurityFlags": 1,
+ "DeadlockDetection": 0,
+ "DisableLocalhostConnectionRateLimit": true,
+ "DisableNetworking": false,
+ "DisableOutgoingConnectionThrottling": false,
+ "EnableAccountUpdatesStats": false,
+ "EnableAgreementReporting": false,
+ "EnableAgreementTimeMetrics": false,
+ "EnableAssembleStats": false,
+ "EnableBlockService": false,
+ "EnableBlockServiceFallbackToArchiver": true,
+ "EnableCatchupFromArchiveServers": false,
+ "EnableDeveloperAPI": false,
+ "EnableGossipBlockService": true,
+ "EnableIncomingMessageFilter": false,
+ "EnableLedgerService": false,
+ "EnableMetricReporting": false,
+ "EnableOutgoingNetworkMessageFiltering": true,
+ "EnablePingHandler": true,
+ "EnableProcessBlockStats": false,
+ "EnableProfiler": false,
+ "EnableRequestLogger": false,
+ "EnableTopAccountsReporting": false,
+ "EndpointAddress": "127.0.0.1:0",
+ "FallbackDNSResolverAddress": "",
+ "ForceRelayMessages": false,
+ "GossipFanout": 4,
+ "IncomingConnectionsLimit": 10000,
+ "IncomingMessageFilterBucketCount": 5,
+ "IncomingMessageFilterBucketSize": 512,
+ "IsIndexerActive": false,
+ "LedgerSynchronousMode": 2,
+ "LogArchiveMaxAge": "",
+ "LogArchiveName": "node.archive.log",
+ "LogSizeLimit": 1073741824,
+ "MaxCatchpointDownloadDuration": 7200000000000,
+ "MaxConnectionsPerIP": 30,
+ "MinCatchpointFileDownloadBytesPerSecond": 20480,
+ "NetAddress": "",
+ "NetworkMessageTraceServer": "",
+ "NetworkProtocolVersion": "",
+ "NodeExporterListenAddress": ":9100",
+ "NodeExporterPath": "./node_exporter",
+ "OptimizeAccountsDatabaseOnStartup": false,
+ "OutgoingMessageFilterBucketCount": 3,
+ "OutgoingMessageFilterBucketSize": 128,
+ "ParticipationKeysRefreshInterval": 60000000000,
+ "PeerConnectionsUpdateInterval": 3600,
+ "PeerPingPeriodSeconds": 0,
+ "PriorityPeers": {},
+ "PublicAddress": "",
+ "ReconnectTime": 60000000000,
+ "ReservedFDs": 256,
+ "RestReadTimeoutSeconds": 15,
+ "RestWriteTimeoutSeconds": 120,
+ "RunHosted": false,
+ "SuggestedFeeBlockHistory": 3,
+ "SuggestedFeeSlidingWindowSize": 50,
+ "TLSCertFile": "",
+ "TLSKeyFile": "",
+ "TelemetryToLog": true,
+ "TxPoolExponentialIncreaseFactor": 2,
+ "TxPoolSize": 15000,
+ "TxSyncIntervalSeconds": 60,
+ "TxSyncServeResponseSize": 1000000,
+ "TxSyncTimeoutSeconds": 30,
+ "UseXForwardedForAddressField": "",
+ "VerifiedTranscationsCacheSize": 30000
+}
diff --git a/test/testdata/deployednettemplates/networks/bootstrapped/badSpec.json b/test/testdata/deployednettemplates/networks/bootstrapped/badSpec.json
new file mode 100644
index 000000000..2070f9c93
--- /dev/null
+++ b/test/testdata/deployednettemplates/networks/bootstrapped/badSpec.json
@@ -0,0 +1,8 @@
+{
+ "numRounds":65000,
+ "roundTransactionsCount": 1000,
+ "generatedAccountsCount": 7000000,
+ "generatedAssetsCount": 200000,
+ "generatedApplicationCount": 1000000,
+ "sourceWalletName": "wallet1",
+}
diff --git a/test/testdata/deployednettemplates/networks/bootstrapped/okSpec.json b/test/testdata/deployednettemplates/networks/bootstrapped/okSpec.json
new file mode 100644
index 000000000..4a2f0f3dc
--- /dev/null
+++ b/test/testdata/deployednettemplates/networks/bootstrapped/okSpec.json
@@ -0,0 +1,8 @@
+{
+ "numRounds":65000,
+ "roundTransactionsCount": 1000,
+ "generatedAccountsCount": 7000000,
+ "generatedAssetsCount": 200000,
+ "generatedApplicationCount": 1000000,
+ "sourceWalletName": "wallet1"
+}
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile
new file mode 100644
index 000000000..ee48014b6
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile
@@ -0,0 +1,16 @@
+PARAMS=-w 100 -R 8 -N 20 -n 100 -H 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+FILEPARAMS=--rounds 5000 --ntrx 1000 --naccounts 3000000 --nassets 20000 --napps 20000 --wallet-name "wallet1"
+
+all: net.json genesis.json boostrappedFile.json
+
+net.json: node.json nonPartNode.json ${GOPATH}/bin/netgoal
+ netgoal generate -t net -r /tmp/wat -o net.json ${PARAMS}
+
+genesis.json: ${GOPATH}/bin/netgoal
+ netgoal generate -t genesis -r /tmp/wat -o genesis.json ${PARAMS}
+
+boostrappedFile.json: ${GOPATH}/bin/netgoal
+ netgoal generate -t loadingFile -r /tmp/wat -o boostrappedFile.json ${FILEPARAMS}
+
+clean:
+ rm -f net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/boostrappedFile.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/boostrappedFile.json
new file mode 100644
index 000000000..7e63535ff
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/boostrappedFile.json
@@ -0,0 +1,8 @@
+{
+ "numRounds":65000,
+ "roundTransactionsCount": 1000,
+ "generatedAccountsCount": 7000000,
+ "generatedAssetsCount": 200000,
+ "generatedApplicationCount": 1000000,
+ "sourceWalletName": "wallet1"
+}
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/gen_topology.py b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/gen_topology.py
new file mode 100644
index 000000000..69e156293
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/gen_topology.py
@@ -0,0 +1,27 @@
+node_types = {"R":8, "N":20, "NPN":10}
+node_size = {"R":"-m5d.4xl", "N":"-m5d.4xl", "NPN":"-m5d.4xl"}
+regions = [
+ "AWS-US-EAST-2"
+]
+
+f = open("topology.json", "w")
+f.write("{ 'Hosts':\n [")
+
+region_count = len(regions)
+first = True
+for x in node_types:
+ node_type = x
+ node_count = node_types[x]
+ region_size = node_size[x]
+ for i in range(node_count):
+ node_name = node_type + str(i+1)
+ region = regions[i%region_count]
+ if (first ):
+ first = False
+ else:
+ f.write(",")
+ f.write ("\n {\n 'Name': '" + node_name + "',\n 'Template': '" + region + region_size + "'\n }" )
+
+f.write("\n ]\n}\n")
+f.close()
+
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json
new file mode 100644
index 000000000..77a347e27
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json
@@ -0,0 +1,1013 @@
+{
+ "NetworkName": "",
+ "VersionModifier": "",
+ "ConsensusProtocol": "future",
+ "FirstPartKeyRound": 0,
+ "LastPartKeyRound": 3000000,
+ "PartKeyDilution": 0,
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet3",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet4",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet5",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet6",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet7",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet8",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet9",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet10",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet11",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet12",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet13",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet14",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet15",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet16",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet17",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet18",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet19",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet20",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet21",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet22",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet23",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet24",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet25",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet26",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet27",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet28",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet29",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet30",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet31",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet32",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet33",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet34",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet35",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet36",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet37",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet38",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet39",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet40",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet41",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet42",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet43",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet44",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet45",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet46",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet47",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet48",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet49",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet50",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet51",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet52",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet53",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet54",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet55",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet56",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet57",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet58",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet59",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet60",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet61",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet62",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet63",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet64",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet65",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet66",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet67",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet68",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet69",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet70",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet71",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet72",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet73",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet74",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet75",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet76",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet77",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet78",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet79",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet80",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet81",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet82",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet83",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet84",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet85",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet86",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet87",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet88",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet89",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet90",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet91",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet92",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet93",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet94",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet95",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet96",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet97",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet98",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet99",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet100",
+ "Stake": 0.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet101",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet102",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet103",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet104",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet105",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet106",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet107",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet108",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet109",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet110",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet111",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet112",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet113",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet114",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet115",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet116",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet117",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet118",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet119",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet120",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet121",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet122",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet123",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet124",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet125",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet126",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet127",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet128",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet129",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet130",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet131",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet132",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet133",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet134",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet135",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet136",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet137",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet138",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet139",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet140",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet141",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet142",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet143",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet144",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet145",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet146",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet147",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet148",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet149",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet150",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet151",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet152",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet153",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet154",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet155",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet156",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet157",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet158",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet159",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet160",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet161",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet162",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet163",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet164",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet165",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet166",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet167",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet168",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet169",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet170",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet171",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet172",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet173",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet174",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet175",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet176",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet177",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet178",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet179",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet180",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet181",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet182",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet183",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet184",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet185",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet186",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet187",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet188",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet189",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet190",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet191",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet192",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet193",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet194",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet195",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet196",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet197",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet198",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet199",
+ "Stake": 0.5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet200",
+ "Stake": 0.5,
+ "Online": false
+ }
+ ],
+ "FeeSink": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "Comment": ""
+}
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json
new file mode 100644
index 000000000..b971cdce5
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json
@@ -0,0 +1,2564 @@
+{
+ "Hosts": [
+ {
+ "Name": "R1",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay1",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay2",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R3",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay3",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R4",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay4",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R5",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay5",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R6",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay6",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R7",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay7",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R8",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay8",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N1",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node1",
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node21",
+ "Wallets": [
+ {
+ "Name": "Wallet2",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node41",
+ "Wallets": [
+ {
+ "Name": "Wallet3",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ },
+ {
+ "Name": "node61",
+ "Wallets": [
+ {
+ "Name": "Wallet4",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node81",
+ "Wallets": [
+ {
+ "Name": "Wallet5",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node2",
+ "Wallets": [
+ {
+ "Name": "Wallet6",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node22",
+ "Wallets": [
+ {
+ "Name": "Wallet7",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node42",
+ "Wallets": [
+ {
+ "Name": "Wallet8",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node62",
+ "Wallets": [
+ {
+ "Name": "Wallet9",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node82",
+ "Wallets": [
+ {
+ "Name": "Wallet10",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N3",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node3",
+ "Wallets": [
+ {
+ "Name": "Wallet11",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node23",
+ "Wallets": [
+ {
+ "Name": "Wallet12",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node43",
+ "Wallets": [
+ {
+ "Name": "Wallet13",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node63",
+ "Wallets": [
+ {
+ "Name": "Wallet14",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node83",
+ "Wallets": [
+ {
+ "Name": "Wallet15",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N4",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node4",
+ "Wallets": [
+ {
+ "Name": "Wallet16",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node24",
+ "Wallets": [
+ {
+ "Name": "Wallet17",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node44",
+ "Wallets": [
+ {
+ "Name": "Wallet18",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node64",
+ "Wallets": [
+ {
+ "Name": "Wallet19",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node84",
+ "Wallets": [
+ {
+ "Name": "Wallet20",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N5",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node5",
+ "Wallets": [
+ {
+ "Name": "Wallet21",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node25",
+ "Wallets": [
+ {
+ "Name": "Wallet22",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node45",
+ "Wallets": [
+ {
+ "Name": "Wallet23",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node65",
+ "Wallets": [
+ {
+ "Name": "Wallet24",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ },
+ {
+ "Name": "node85",
+ "Wallets": [
+ {
+ "Name": "Wallet25",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N6",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node6",
+ "Wallets": [
+ {
+ "Name": "Wallet26",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node26",
+ "Wallets": [
+ {
+ "Name": "Wallet27",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node46",
+ "Wallets": [
+ {
+ "Name": "Wallet28",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node66",
+ "Wallets": [
+ {
+ "Name": "Wallet29",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node86",
+ "Wallets": [
+ {
+ "Name": "Wallet30",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N7",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node7",
+ "Wallets": [
+ {
+ "Name": "Wallet31",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ },
+ {
+ "Name": "node27",
+ "Wallets": [
+ {
+ "Name": "Wallet32",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node47",
+ "Wallets": [
+ {
+ "Name": "Wallet33",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node67",
+ "Wallets": [
+ {
+ "Name": "Wallet34",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node87",
+ "Wallets": [
+ {
+ "Name": "Wallet35",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N8",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node8",
+ "Wallets": [
+ {
+ "Name": "Wallet36",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ },
+ {
+ "Name": "node28",
+ "Wallets": [
+ {
+ "Name": "Wallet37",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node48",
+ "Wallets": [
+ {
+ "Name": "Wallet38",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node68",
+ "Wallets": [
+ {
+ "Name": "Wallet39",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node88",
+ "Wallets": [
+ {
+ "Name": "Wallet40",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N9",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node9",
+ "Wallets": [
+ {
+ "Name": "Wallet41",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ },
+ {
+ "Name": "node29",
+ "Wallets": [
+ {
+ "Name": "Wallet42",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node49",
+ "Wallets": [
+ {
+ "Name": "Wallet43",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node69",
+ "Wallets": [
+ {
+ "Name": "Wallet44",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node89",
+ "Wallets": [
+ {
+ "Name": "Wallet45",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N10",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node10",
+ "Wallets": [
+ {
+ "Name": "Wallet46",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node30",
+ "Wallets": [
+ {
+ "Name": "Wallet47",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node50",
+ "Wallets": [
+ {
+ "Name": "Wallet48",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node70",
+ "Wallets": [
+ {
+ "Name": "Wallet49",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node90",
+ "Wallets": [
+ {
+ "Name": "Wallet50",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N11",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node11",
+ "Wallets": [
+ {
+ "Name": "Wallet51",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node31",
+ "Wallets": [
+ {
+ "Name": "Wallet52",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node51",
+ "Wallets": [
+ {
+ "Name": "Wallet53",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node71",
+ "Wallets": [
+ {
+ "Name": "Wallet54",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ },
+ {
+ "Name": "node91",
+ "Wallets": [
+ {
+ "Name": "Wallet55",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N12",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node12",
+ "Wallets": [
+ {
+ "Name": "Wallet56",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node32",
+ "Wallets": [
+ {
+ "Name": "Wallet57",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ },
+ {
+ "Name": "node52",
+ "Wallets": [
+ {
+ "Name": "Wallet58",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node72",
+ "Wallets": [
+ {
+ "Name": "Wallet59",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node92",
+ "Wallets": [
+ {
+ "Name": "Wallet60",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N13",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node13",
+ "Wallets": [
+ {
+ "Name": "Wallet61",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node33",
+ "Wallets": [
+ {
+ "Name": "Wallet62",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ },
+ {
+ "Name": "node53",
+ "Wallets": [
+ {
+ "Name": "Wallet63",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node73",
+ "Wallets": [
+ {
+ "Name": "Wallet64",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node93",
+ "Wallets": [
+ {
+ "Name": "Wallet65",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N14",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node14",
+ "Wallets": [
+ {
+ "Name": "Wallet66",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node34",
+ "Wallets": [
+ {
+ "Name": "Wallet67",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node54",
+ "Wallets": [
+ {
+ "Name": "Wallet68",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ },
+ {
+ "Name": "node74",
+ "Wallets": [
+ {
+ "Name": "Wallet69",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node94",
+ "Wallets": [
+ {
+ "Name": "Wallet70",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N15",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node15",
+ "Wallets": [
+ {
+ "Name": "Wallet71",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node35",
+ "Wallets": [
+ {
+ "Name": "Wallet72",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node55",
+ "Wallets": [
+ {
+ "Name": "Wallet73",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node75",
+ "Wallets": [
+ {
+ "Name": "Wallet74",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node95",
+ "Wallets": [
+ {
+ "Name": "Wallet75",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N16",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node16",
+ "Wallets": [
+ {
+ "Name": "Wallet76",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node36",
+ "Wallets": [
+ {
+ "Name": "Wallet77",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ },
+ {
+ "Name": "node56",
+ "Wallets": [
+ {
+ "Name": "Wallet78",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ },
+ {
+ "Name": "node76",
+ "Wallets": [
+ {
+ "Name": "Wallet79",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node96",
+ "Wallets": [
+ {
+ "Name": "Wallet80",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N17",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node17",
+ "Wallets": [
+ {
+ "Name": "Wallet81",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node37",
+ "Wallets": [
+ {
+ "Name": "Wallet82",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node57",
+ "Wallets": [
+ {
+ "Name": "Wallet83",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node77",
+ "Wallets": [
+ {
+ "Name": "Wallet84",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node97",
+ "Wallets": [
+ {
+ "Name": "Wallet85",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N18",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node18",
+ "Wallets": [
+ {
+ "Name": "Wallet86",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node38",
+ "Wallets": [
+ {
+ "Name": "Wallet87",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ },
+ {
+ "Name": "node58",
+ "Wallets": [
+ {
+ "Name": "Wallet88",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node78",
+ "Wallets": [
+ {
+ "Name": "Wallet89",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node98",
+ "Wallets": [
+ {
+ "Name": "Wallet90",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N19",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node19",
+ "Wallets": [
+ {
+ "Name": "Wallet91",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node39",
+ "Wallets": [
+ {
+ "Name": "Wallet92",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node59",
+ "Wallets": [
+ {
+ "Name": "Wallet93",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node79",
+ "Wallets": [
+ {
+ "Name": "Wallet94",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node99",
+ "Wallets": [
+ {
+ "Name": "Wallet95",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N20",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node20",
+ "Wallets": [
+ {
+ "Name": "Wallet96",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node40",
+ "Wallets": [
+ {
+ "Name": "Wallet97",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node60",
+ "Wallets": [
+ {
+ "Name": "Wallet98",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node80",
+ "Wallets": [
+ {
+ "Name": "Wallet99",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ },
+ {
+ "Name": "node100",
+ "Wallets": [
+ {
+ "Name": "Wallet100",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "NPN1",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode1",
+ "Wallets": [
+ {
+ "Name": "Wallet101",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet111",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet121",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet131",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet141",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet151",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet161",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet171",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet181",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet191",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode2",
+ "Wallets": [
+ {
+ "Name": "Wallet102",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet112",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet122",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet132",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet142",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet152",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet162",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet172",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet182",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet192",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN3",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode3",
+ "Wallets": [
+ {
+ "Name": "Wallet103",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet113",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet123",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet133",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet143",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet153",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet163",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet173",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet183",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet193",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN4",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode4",
+ "Wallets": [
+ {
+ "Name": "Wallet104",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet114",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet124",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet134",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet144",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet154",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet164",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet174",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet184",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet194",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN5",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode5",
+ "Wallets": [
+ {
+ "Name": "Wallet105",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet115",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet125",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet135",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet145",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet155",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet165",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet175",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet185",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet195",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN6",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode6",
+ "Wallets": [
+ {
+ "Name": "Wallet106",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet116",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet126",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet136",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet146",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet156",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet166",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet176",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet186",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet196",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN7",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode7",
+ "Wallets": [
+ {
+ "Name": "Wallet107",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet117",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet127",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet137",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet147",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet157",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet167",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet177",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet187",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet197",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN8",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode8",
+ "Wallets": [
+ {
+ "Name": "Wallet108",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet118",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet128",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet138",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet148",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet158",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet168",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet178",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet188",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet198",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN9",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode9",
+ "Wallets": [
+ {
+ "Name": "Wallet109",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet119",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet129",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet139",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet149",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet159",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet169",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet179",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet189",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet199",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN10",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode10",
+ "Wallets": [
+ {
+ "Name": "Wallet110",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet120",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet130",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet140",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet150",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet160",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet170",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet180",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet190",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet200",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json
new file mode 100644
index 000000000..412f2937a
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json
@@ -0,0 +1,22 @@
+{
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": false,
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}",
+ "AltConfigs": [
+ {
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }",
+ "FractionApply": 0.2
+ }
+ ]
+}
+
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/nonPartNode.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/nonPartNode.json
new file mode 100644
index 000000000..8ab3b8bdd
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/nonPartNode.json
@@ -0,0 +1,5 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+}
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/recipe.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/recipe.json
new file mode 100644
index 000000000..766328dbb
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/recipe.json
@@ -0,0 +1,8 @@
+{
+ "GenesisFile":"genesis.json",
+ "NetworkFile":"net.json",
+ "ConfigFile": "../../configs/reference.json",
+ "HostTemplatesFile": "../../hosttemplates/hosttemplates.json",
+ "TopologyFile": "topology.json",
+ "BootstrappedFile": "boostrappedFile.json"
+}
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/relay.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/relay.json
new file mode 100644
index 000000000..db8fb939d
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/relay.json
@@ -0,0 +1,11 @@
+{
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+}
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/topology.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/topology.json
new file mode 100644
index 000000000..8e9c8e7cd
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/topology.json
@@ -0,0 +1,156 @@
+{ "Hosts":
+ [
+ {
+ "Name": "R1",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R2",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R3",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R4",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R5",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R6",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R7",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R8",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN1",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN2",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN3",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN4",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN5",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN6",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN7",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN8",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN9",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN10",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N1",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N2",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N3",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N4",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N5",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N6",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N7",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N8",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N9",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N10",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N11",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N12",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N13",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N14",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N15",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N16",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N17",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N18",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N19",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N20",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/scenario1/node.json b/test/testdata/deployednettemplates/recipes/scenario1/node.json
index 0788913ab..2d08bcf07 100644
--- a/test/testdata/deployednettemplates/recipes/scenario1/node.json
+++ b/test/testdata/deployednettemplates/recipes/scenario1/node.json
@@ -5,7 +5,7 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}",
"AltConfigs": [
{
"APIToken": "{{APIToken}}",
@@ -14,7 +14,7 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }",
"FractionApply": 0.2
}
]
diff --git a/test/testdata/deployednettemplates/recipes/scenario1/relay.json b/test/testdata/deployednettemplates/recipes/scenario1/relay.json
index 25bb6b5a2..db8fb939d 100644
--- a/test/testdata/deployednettemplates/recipes/scenario1/relay.json
+++ b/test/testdata/deployednettemplates/recipes/scenario1/relay.json
@@ -7,5 +7,5 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
}
diff --git a/test/testdata/deployednettemplates/recipes/scenario2/node.json b/test/testdata/deployednettemplates/recipes/scenario2/node.json
index 6b3849736..3641b0f6d 100644
--- a/test/testdata/deployednettemplates/recipes/scenario2/node.json
+++ b/test/testdata/deployednettemplates/recipes/scenario2/node.json
@@ -5,5 +5,5 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
diff --git a/test/testdata/deployednettemplates/recipes/scenario2/relay.json b/test/testdata/deployednettemplates/recipes/scenario2/relay.json
index 25bb6b5a2..db8fb939d 100644
--- a/test/testdata/deployednettemplates/recipes/scenario2/relay.json
+++ b/test/testdata/deployednettemplates/recipes/scenario2/relay.json
@@ -7,5 +7,5 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
}
diff --git a/test/testdata/deployednettemplates/recipes/scenario3/node.json b/test/testdata/deployednettemplates/recipes/scenario3/node.json
index aec502ef5..b60d95e0c 100644
--- a/test/testdata/deployednettemplates/recipes/scenario3/node.json
+++ b/test/testdata/deployednettemplates/recipes/scenario3/node.json
@@ -5,7 +5,7 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }",
"AltConfigs": [
{
"APIToken": "{{APIToken}}",
@@ -14,7 +14,7 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}",
"FractionApply": 0.01
}
]
diff --git a/test/testdata/deployednettemplates/recipes/scenario3/relay.json b/test/testdata/deployednettemplates/recipes/scenario3/relay.json
index f568eb3de..f0d447a81 100644
--- a/test/testdata/deployednettemplates/recipes/scenario3/relay.json
+++ b/test/testdata/deployednettemplates/recipes/scenario3/relay.json
@@ -7,5 +7,5 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
diff --git a/test/testdata/nettemplates/ShortParticipationKeys.json b/test/testdata/nettemplates/ShortParticipationKeys.json
new file mode 100644
index 000000000..28d06324f
--- /dev/null
+++ b/test/testdata/nettemplates/ShortParticipationKeys.json
@@ -0,0 +1,55 @@
+{
+ "Genesis": {
+ "NetworkName": "shortpartkeys",
+ "ConsensusProtocol": "shortpartkeysprotocol",
+ "FirstPartKeyRound": 0,
+ "LastPartKeyRound": 36,
+ "PartKeyDilution": 8,
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 33,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 33,
+ "Online": true
+ },
+ {
+ "Name": "Wallet3",
+ "Stake": 34,
+ "Online": true
+ }
+ ]
+ },
+ "Nodes": [
+ {
+ "Name": "Primary",
+ "IsRelay": true,
+ "Wallets": [
+ ]
+ },
+ {
+ "Name": "Node1",
+ "Wallets": [
+ { "Name": "Wallet1",
+ "ParticipationOnly": false }
+ ]
+ },
+ {
+ "Name": "Node2",
+ "Wallets": [
+ { "Name": "Wallet2",
+ "ParticipationOnly": false }
+ ]
+ },
+ {
+ "Name": "Node3",
+ "Wallets": [
+ { "Name": "Wallet3",
+ "ParticipationOnly": false }
+ ]
+ }
+ ]
+}
diff --git a/test/testdata/nettemplates/SingleNodeNetwork.json b/test/testdata/nettemplates/SingleNodeNetwork.json
new file mode 100644
index 000000000..359b75067
--- /dev/null
+++ b/test/testdata/nettemplates/SingleNodeNetwork.json
@@ -0,0 +1,33 @@
+{
+ "Genesis": {
+ "NetworkName": "snn",
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 50,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 25,
+ "Online": true
+ },
+ {
+ "Name": "Wallet3",
+ "Stake": 25,
+ "Online": true
+ }
+ ]
+ },
+ "Nodes": [
+ {
+ "Name": "Primary",
+ "IsRelay": false,
+ "Wallets": [
+ { "Name": "Wallet1", "ParticipationOnly": false },
+ { "Name": "Wallet2", "ParticipationOnly": false },
+ { "Name": "Wallet3", "ParticipationOnly": false }
+ ]
+ }
+ ]
+}
diff --git a/test/testdata/nettemplates/TwoNodes50EachV26.json b/test/testdata/nettemplates/TwoNodes50EachV26.json
new file mode 100644
index 000000000..848765da1
--- /dev/null
+++ b/test/testdata/nettemplates/TwoNodes50EachV26.json
@@ -0,0 +1,29 @@
+{
+ "Genesis": {
+ "NetworkName": "tbd",
+ "ConsensusProtocol": "https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff",
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 50,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 50,
+ "Online": true
+ }
+ ]
+ },
+ "Nodes": [
+ {
+ "Name": "Primary",
+ "IsRelay": true,
+ "Wallets": [{ "Name": "Wallet1", "ParticipationOnly": false }]
+ },
+ {
+ "Name": "Node",
+ "Wallets": [{ "Name": "Wallet2", "ParticipationOnly": false }]
+ }
+ ]
+}
diff --git a/tools/network/telemetryURIUpdateService.go b/tools/network/telemetryURIUpdateService.go
index ddc972e29..a6e0da307 100644
--- a/tools/network/telemetryURIUpdateService.go
+++ b/tools/network/telemetryURIUpdateService.go
@@ -60,7 +60,7 @@ func (t *telemetryURIUpdater) Start() {
updateTelemetryURI := func() {
endpointURL := t.lookupTelemetryURL()
- if endpointURL != nil && endpointURL.String() != t.log.GetTelemetryURI() {
+ if endpointURL != nil && endpointURL.String() != t.log.GetTelemetryURI() && false == t.cfg.DisableNetworking {
err := t.log.UpdateTelemetryURI(endpointURL.String())
if err != nil {
t.log.Warnf("Unable to update telemetry URI to '%s' : %v", endpointURL.String(), err)
diff --git a/util/bloom/bloom.go b/util/bloom/bloom.go
index 21ec4454f..eeffc88ef 100644
--- a/util/bloom/bloom.go
+++ b/util/bloom/bloom.go
@@ -18,19 +18,24 @@ const maxHashes = uint32(32)
// Filter represents the state of the Bloom filter
type Filter struct {
- numHashes uint32
- data []byte
- prefix [4]byte
+ numHashes uint32
+ data []byte
+ prefix [4]byte
+ hashStagingBuffer []uint32
+ preimageStagingBuffer []byte
}
// New creates a new Bloom filter
func New(sizeBits int, numHashes uint32, prefix uint32) *Filter {
m := (sizeBits + 7) / 8
filter := Filter{
- numHashes: numHashes,
- data: make([]byte, m),
+ numHashes: numHashes,
+ data: make([]byte, m),
+ hashStagingBuffer: make([]uint32, numHashes+3),
}
binary.BigEndian.PutUint32(filter.prefix[:], prefix)
+ filter.preimageStagingBuffer = make([]byte, len(filter.prefix), len(filter.prefix)+32)
+ copy(filter.preimageStagingBuffer, filter.prefix[:])
return &filter
}
@@ -53,10 +58,18 @@ func Optimal(numElements int, falsePositiveRate float64) (sizeBits int, numHashe
return int(math.Ceil(m)), numHashes
}
+// makePreimage creates the preimage we use for a byte-array before hashing it.
+func (f *Filter) makePreimage(x []byte) (preimage []byte) {
+ preimage = f.preimageStagingBuffer
+ preimage = append(preimage, x...)
+ return
+}
+
// Set marks x as present in the filter
func (f *Filter) Set(x []byte) {
- withPrefix := append(f.prefix[:], x...)
- hs := hash(withPrefix, f.numHashes)
+ withPrefix := f.makePreimage(x)
+ hs := f.hash(withPrefix)
+ f.preimageStagingBuffer = withPrefix[:len(f.prefix)]
n := uint32(len(f.data) * 8)
for _, h := range hs {
f.set(h % n)
@@ -65,8 +78,9 @@ func (f *Filter) Set(x []byte) {
// Test checks whether x is present in the filter
func (f *Filter) Test(x []byte) bool {
- withPrefix := append(f.prefix[:], x...)
- hs := hash(withPrefix, f.numHashes)
+ withPrefix := f.makePreimage(x)
+ hs := f.hash(withPrefix)
+ f.preimageStagingBuffer = withPrefix[:len(f.prefix)]
n := uint32(len(f.data) * 8)
for _, h := range hs {
if !f.test(h % n) {
@@ -116,6 +130,9 @@ func UnmarshalBinary(data []byte) (*Filter, error) {
}
copy(f.prefix[:], data[4:8])
f.data = data[8:]
+ f.preimageStagingBuffer = make([]byte, len(f.prefix), len(f.prefix)+32)
+ f.hashStagingBuffer = make([]uint32, f.numHashes+3)
+ copy(f.preimageStagingBuffer, f.prefix[:])
return f, nil
}
@@ -140,10 +157,10 @@ func UnmarshalJSON(data []byte) (*Filter, error) {
// Previously, we used the hashing method described in this paper:
// http://www.eecs.harvard.edu/~michaelm/postscripts/rsa2008.pdf
// but this gave us bad false positive rates for small bloom filters.
-func hash(x []byte, nhash uint32) []uint32 {
- res := make([]uint32, nhash+3)
+func (f *Filter) hash(x []byte) []uint32 {
+ res := f.hashStagingBuffer
- for i := uint32(0); i < (nhash+3)/4; i++ {
+ for i := uint32(0); i < (f.numHashes+3)/4; i++ {
h1, h2 := siphash.Hash128(uint64(i), 666666, x)
res[i*4] = uint32(h1)
@@ -152,7 +169,7 @@ func hash(x []byte, nhash uint32) []uint32 {
res[i*4+3] = uint32(h2 >> 32)
}
- return res[:nhash]
+ return res[:f.numHashes]
}
func (f *Filter) test(bit uint32) bool {
diff --git a/util/bloom/bloom_test.go b/util/bloom/bloom_test.go
index f755fb62a..abf4ff5c4 100644
--- a/util/bloom/bloom_test.go
+++ b/util/bloom/bloom_test.go
@@ -14,6 +14,8 @@ import (
"testing"
"github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/crypto"
)
func TestBitset(t *testing.T) {
@@ -247,3 +249,121 @@ func TestBinaryMarshalLength(t *testing.T) {
}
}
}
+
+func TestBloomFilterMemoryConsumption(t *testing.T) {
+ t.Run("Set", func(t *testing.T) {
+ N := 1000000
+ sizeBits, numHashes := Optimal(N, 0.01)
+ prefix := uint32(0)
+ bf := New(sizeBits, numHashes, prefix)
+
+ dataset := make([][]byte, N)
+ for n := 0; n < N; n++ {
+ hash := crypto.Hash([]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)})
+ dataset[n] = hash[:]
+ }
+
+ result := testing.Benchmark(func(b *testing.B) {
+ // start this test with 10K iterations.
+ if b.N < 10000 {
+ b.N = 10000
+ }
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ bf.Set(dataset[n%N])
+ }
+ })
+
+ // make sure the memory allocated is less than 1 byte / iteration.
+ require.LessOrEqual(t, uint64(result.MemBytes), uint64(result.N))
+ })
+ t.Run("Test", func(t *testing.T) {
+ N := 1000000
+ sizeBits, numHashes := Optimal(N, 0.01)
+ prefix := uint32(0)
+ bf := New(sizeBits, numHashes, prefix)
+
+ dataset := make([][]byte, N)
+ for n := 0; n < N; n++ {
+ hash := crypto.Hash([]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)})
+ dataset[n] = hash[:]
+ }
+
+ // set half of them.
+ for n := 0; n < N/2; n++ {
+ bf.Set(dataset[n])
+ }
+ result := testing.Benchmark(func(b *testing.B) {
+ // start this test with 10K iterations.
+ if b.N < 1000000 {
+ b.N = 1000000
+ }
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ bf.Test(dataset[n%N])
+ }
+ })
+
+ // make sure the memory allocated is less than 1 byte / iteration.
+ require.LessOrEqual(t, uint64(result.MemBytes), uint64(result.N))
+ })
+}
+
+func BenchmarkBloomFilterSet(b *testing.B) {
+ bfElements := 1000000
+ sizeBits, numHashes := Optimal(bfElements, 0.01)
+ prefix := uint32(0)
+ bf := New(sizeBits, numHashes, prefix)
+ dataset := make([][]byte, bfElements)
+ for n := 0; n < bfElements; n++ {
+ hash := crypto.Hash([]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)})
+ dataset[n] = hash[:]
+ }
+
+ b.ResetTimer()
+ for x := 0; x < b.N; x++ {
+ bf.Set(dataset[x%bfElements])
+ }
+}
+
+func BenchmarkBloomFilterTest(b *testing.B) {
+ bfElements := 1000000
+ sizeBits, numHashes := Optimal(bfElements, 0.01)
+ prefix := uint32(0)
+ bf := New(sizeBits, numHashes, prefix)
+ dataset := make([][]byte, bfElements)
+ for n := 0; n < bfElements; n++ {
+ hash := crypto.Hash([]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)})
+ dataset[n] = hash[:]
+ }
+ // set half of them.
+ for n := 0; n < bfElements/2; n++ {
+ bf.Set(dataset[n])
+ }
+
+ b.ResetTimer()
+ for x := 0; x < b.N; x++ {
+ bf.Test(dataset[x%bfElements])
+ }
+}
+
+// TestBloomFilterReferenceHash ensure that we generate a bloom filter in a consistent way. This is important since we want to ensure that
+// this code is backward compatible.
+func TestBloomFilterReferenceHash(t *testing.T) {
+ N := 3
+ sizeBits, numHashes := Optimal(N, 0.01)
+ prefix := uint32(0x11223344)
+ bf := New(sizeBits, numHashes, prefix)
+
+ for n := 0; n < N; n++ {
+ hash := crypto.Hash([]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)})
+ bf.Set(hash[:])
+ }
+ bytes, err := bf.MarshalBinary()
+ require.NoError(t, err)
+ require.Equal(t, []byte{0x0, 0x0, 0x0, 0x7, 0x11, 0x22, 0x33, 0x44, 0x62, 0xf0, 0xe, 0x2c, 0x8c}, bytes)
+}
diff --git a/util/db/dbutil.go b/util/db/dbutil.go
index cb7a92d7f..76545cced 100644
--- a/util/db/dbutil.go
+++ b/util/db/dbutil.go
@@ -438,6 +438,12 @@ func dbretry(obj error) bool {
return ok && (err.Code == sqlite3.ErrLocked || err.Code == sqlite3.ErrBusy)
}
+// IsErrBusy examine the input inerr varaible of type error and determine if it's a sqlite3 error for the ErrBusy error code.
+func IsErrBusy(inerr error) bool {
+ err, ok := inerr.(sqlite3.Error)
+ return ok && (err.Code == sqlite3.ErrBusy)
+}
+
type idemFn func(ctx context.Context, tx *sql.Tx) error
const infoTxRetries = 5
diff --git a/util/db/perf_test.go b/util/db/perf_test.go
index 06deecf01..eb25bbcbf 100644
--- a/util/db/perf_test.go
+++ b/util/db/perf_test.go
@@ -65,6 +65,7 @@ func BenchmarkSQLErasableWrites(b *testing.B) {
wdb, err := MakeErasableAccessor(fn)
require.NoError(b, err)
+ defer wdb.Close()
logging.Base().SetLevel(logging.Error)
diff --git a/util/metrics/counter_test.go b/util/metrics/counter_test.go
index 473ac1048..a6c5b3ead 100644
--- a/util/metrics/counter_test.go
+++ b/util/metrics/counter_test.go
@@ -53,7 +53,7 @@ func TestMetricCounter(t *testing.T) {
// wait half-a cycle
time.Sleep(test.sampleRate / 2)
}
- // wait two reporting cycles to ensure we recieved all the messages.
+ // wait two reporting cycles to ensure we received all the messages.
time.Sleep(test.sampleRate * 2)
metricService.Shutdown()
@@ -98,7 +98,7 @@ func TestMetricCounterFastInts(t *testing.T) {
time.Sleep(test.sampleRate / 2)
}
counter.AddUint64(2, nil)
- // wait two reporting cycles to ensure we recieved all the messages.
+ // wait two reporting cycles to ensure we received all the messages.
time.Sleep(test.sampleRate * 2)
metricService.Shutdown()
@@ -145,7 +145,7 @@ func TestMetricCounterMixed(t *testing.T) {
time.Sleep(test.sampleRate / 2)
}
counter.AddUint64(2, nil)
- // wait two reporting cycles to ensure we recieved all the messages.
+ // wait two reporting cycles to ensure we received all the messages.
time.Sleep(test.sampleRate * 2)
metricService.Shutdown()
diff --git a/util/metrics/gauge_test.go b/util/metrics/gauge_test.go
index 47de020bd..e2262ce7b 100644
--- a/util/metrics/gauge_test.go
+++ b/util/metrics/gauge_test.go
@@ -54,7 +54,7 @@ func TestMetricGauge(t *testing.T) {
time.Sleep(test.sampleRate / 2)
}
- // wait two reporting cycles to ensure we recieved all the messages.
+ // wait two reporting cycles to ensure we received all the messages.
time.Sleep(test.sampleRate * 2)
metricService.Shutdown()
diff --git a/util/metrics/segment_test.go b/util/metrics/segment_test.go
index 0d1226904..3bd07eaa9 100644
--- a/util/metrics/segment_test.go
+++ b/util/metrics/segment_test.go
@@ -57,7 +57,7 @@ func TestMetricSegment(t *testing.T) {
}
segmentTest()
segmentTest()
- // wait two reporting cycles to ensure we recieved all the messages.
+ // wait two reporting cycles to ensure we received all the messages.
time.Sleep(test.sampleRate * 2)
metricService.Shutdown()
diff --git a/util/metrics/tagcounter.go b/util/metrics/tagcounter.go
new file mode 100644
index 000000000..3b97454f3
--- /dev/null
+++ b/util/metrics/tagcounter.go
@@ -0,0 +1,151 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package metrics
+
+import (
+ "strconv"
+ "strings"
+ "sync/atomic"
+
+ "github.com/algorand/go-deadlock"
+)
+
+// NewTagCounter makes a set of metrics under rootName for tagged counting.
+// "{TAG}" in rootName is replaced by the tag, otherwise "_{TAG}" is appended.
+func NewTagCounter(rootName, desc string) *TagCounter {
+ tc := &TagCounter{Name: rootName, Description: desc}
+ DefaultRegistry().Register(tc)
+ return tc
+}
+
+// TagCounter holds a set of counters
+type TagCounter struct {
+ Name string
+ Description string
+
+ // a read only race-free reference to tags
+ tagptr atomic.Value
+
+ tags map[string]*uint64
+
+ storage [][]uint64
+ storagePos int
+
+ tagLock deadlock.Mutex
+}
+
+// Add t[tag] += val, fast and multithread safe
+func (tc *TagCounter) Add(tag string, val uint64) {
+ for {
+ var tags map[string]*uint64
+ tagptr := tc.tagptr.Load()
+ if tagptr != nil {
+ tags = tagptr.(map[string]*uint64)
+ }
+
+ count, ok := tags[tag]
+ if ok {
+ atomic.AddUint64(count, val)
+ return
+ }
+ tc.tagLock.Lock()
+ if _, ok = tc.tags[tag]; !ok {
+ // Still need to add a new tag.
+ // Make a new map so there's never any race.
+ newtags := make(map[string]*uint64, len(tc.tags)+1)
+ for k, v := range tc.tags {
+ newtags[k] = v
+ }
+ var st []uint64
+ if len(tc.storage) > 0 {
+ st = tc.storage[len(tc.storage)-1]
+ //fmt.Printf("new tag %v, old block\n", tag)
+ }
+ if tc.storagePos > (len(st) - 1) {
+ //fmt.Printf("new tag %v, new block\n", tag)
+ st = make([]uint64, 16)
+ tc.storagePos = 0
+ tc.storage = append(tc.storage, st)
+ }
+ newtags[tag] = &(st[tc.storagePos])
+ //fmt.Printf("tag %v = %p\n", tag, newtags[tag])
+ tc.storagePos++
+ tc.tags = newtags
+ tc.tagptr.Store(newtags)
+ }
+ tc.tagLock.Unlock()
+ }
+}
+
+// WriteMetric is part of the Metric interface
+func (tc *TagCounter) WriteMetric(buf *strings.Builder, parentLabels string) {
+ tagptr := tc.tagptr.Load()
+ if tagptr == nil {
+ // no values, nothing to say.
+ return
+ }
+ // TODO: what to do with "parentLabels"? obsolete part of interface?
+ buf.WriteString("# ")
+ buf.WriteString(tc.Name)
+ buf.WriteString(" ")
+ buf.WriteString(tc.Description)
+ buf.WriteString("\n")
+ isTemplate := strings.Contains(tc.Name, "{TAG}")
+ tags := tagptr.(map[string]*uint64)
+ for tag, tagcount := range tags {
+ if tagcount == nil {
+ continue
+ }
+ if isTemplate {
+ name := strings.ReplaceAll(tc.Name, "{TAG}", tag)
+ buf.WriteString(name)
+ buf.WriteRune(' ')
+ buf.WriteString(strconv.FormatUint(*tagcount, 10))
+ buf.WriteRune('\n')
+ } else {
+ buf.WriteString(tc.Name)
+ buf.WriteRune('_')
+ buf.WriteString(tag)
+ buf.WriteRune(' ')
+ buf.WriteString(strconv.FormatUint(*tagcount, 10))
+ buf.WriteRune('\n')
+ }
+ }
+}
+
+// AddMetric is part of the Metric interface
+// Copy the values in this TagCounter out into the string-string map.
+func (tc *TagCounter) AddMetric(values map[string]string) {
+ tagp := tc.tagptr.Load()
+ if tagp == nil {
+ return
+ }
+ isTemplate := strings.Contains(tc.Name, "{TAG}")
+ tags := tagp.(map[string]*uint64)
+ for tag, tagcount := range tags {
+ if tagcount == nil {
+ continue
+ }
+ var name string
+ if isTemplate {
+ name = strings.ReplaceAll(tc.Name, "{TAG}", tag)
+ } else {
+ name = tc.Name + "_" + tag
+ }
+ values[name] = strconv.FormatUint(*tagcount, 10)
+ }
+}
diff --git a/util/metrics/tagcounter_test.go b/util/metrics/tagcounter_test.go
new file mode 100644
index 000000000..52d0cb349
--- /dev/null
+++ b/util/metrics/tagcounter_test.go
@@ -0,0 +1,146 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package metrics
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestTagCounter(t *testing.T) {
+ tags := make([]string, 17)
+ for i := range tags {
+ tags[i] = fmt.Sprintf("A%c", 'A'+i)
+ }
+ //t.Logf("tags %v", tags)
+ countsIn := make([]uint64, len(tags))
+ for i := range countsIn {
+ countsIn[i] = uint64(10 * (i + 1))
+ }
+
+ tc := NewTagCounter("tc", "wat")
+
+ // check that empty TagCounter cleanly returns no results
+ var sb strings.Builder
+ tc.WriteMetric(&sb, "")
+ require.Equal(t, "", sb.String())
+
+ result := make(map[string]string)
+ tc.AddMetric(result)
+ require.Equal(t, 0, len(result))
+
+ var wg sync.WaitGroup
+ wg.Add(len(tags))
+
+ runf := func(tag string, count uint64) {
+ for i := 0; i < int(count); i++ {
+ tc.Add(tag, 1)
+ }
+ wg.Done()
+ }
+
+ for i, tag := range tags {
+ go runf(tag, countsIn[i])
+ }
+ wg.Wait()
+
+ endtags := tc.tagptr.Load().(map[string]*uint64)
+ for i, tag := range tags {
+ countin := countsIn[i]
+ endcountp := endtags[tag]
+ if endcountp == nil {
+ t.Errorf("tag[%d] %s nil counter", i, tag)
+ continue
+ }
+ endcount := *endcountp
+ if endcount != countin {
+ t.Errorf("tag[%d] %v wanted %d got %d", i, tag, countin, endcount)
+ }
+ }
+}
+
+func BenchmarkTagCounter(b *testing.B) {
+ b.Logf("b.N = %d", b.N)
+ t := b
+ tags := make([]string, 17)
+ for i := range tags {
+ tags[i] = fmt.Sprintf("A%c", 'A'+i)
+ }
+ //t.Logf("tags %v", tags)
+ triangle := make([]int, len(tags))
+ tsum := 0
+ for i := range triangle {
+ triangle[i] = i + 1
+ tsum += i + 1
+ }
+ wholeN := b.N / tsum
+ remainder := b.N - (tsum * wholeN)
+ rchunk := (remainder / len(tags)) + 1
+ countsIn := make([]uint64, len(tags))
+ csum := uint64(0)
+ for i := range countsIn {
+ rcc := rchunk
+ if remainder < rcc {
+ rcc = remainder
+ remainder = 0
+ } else {
+ remainder -= rchunk
+ }
+ countsIn[i] = uint64((triangle[i] * wholeN) + rcc)
+ csum += countsIn[i]
+ }
+ if csum != uint64(b.N) {
+ b.Errorf("b.N = %d, but total = %d", b.N, csum)
+ }
+
+ tc := NewTagCounter("tc", "wat")
+ //var wg sync.WaitGroup
+ //wg.Add(len(tags))
+
+ runf := func(tag string, count uint64) {
+ for i := 0; i < int(count); i++ {
+ tc.Add(tag, 1)
+ }
+ //wg.Done()
+ }
+
+ for i, tag := range tags {
+ // don't run in threads so that we can benchmark time
+ runf(tag, countsIn[i])
+ }
+ //wg.Wait()
+
+ endtags := tc.tagptr.Load().(map[string]*uint64)
+ for i, tag := range tags {
+ countin := countsIn[i]
+ endcount := uint64(0)
+ endcountp := endtags[tag]
+ if endcountp != nil {
+ endcount = *endcountp
+ //t.Errorf("tag[%d] %s nil counter", i, tag)
+ //continue
+ }
+ //endcount := *endcountp
+ if endcount != countin {
+ t.Errorf("tag[%d] %v wanted %d got %d", i, tag, countin, endcount)
+ }
+ }
+}
diff --git a/util/process_common.go b/util/process_common.go
index ffefbdb34..bb706b308 100644
--- a/util/process_common.go
+++ b/util/process_common.go
@@ -19,9 +19,15 @@
package util
import (
+ "os"
"syscall"
)
+// FindProcess looks for a running process by its pid
+func FindProcess(pid int) (*os.Process, error) {
+ return os.FindProcess(pid)
+}
+
// KillProcess kills a running OS process
func KillProcess(pid int, sig syscall.Signal) error {
return syscall.Kill(pid, sig)
diff --git a/util/process_windows.go b/util/process_windows.go
index c86abdc28..7f21e08c6 100644
--- a/util/process_windows.go
+++ b/util/process_windows.go
@@ -19,49 +19,161 @@
package util
import (
+ "errors"
"os"
+ "syscall"
"unsafe"
"golang.org/x/sys/windows"
)
-// KillProcess kills a running OS process
-func KillProcess(pid int, _ os.Signal) error {
+const (
+ ERROR_INVALID_PARAMETER = syscall.Errno(87)
+
+ STATUS_CANCELLED = uint32(0xC0000120)
+
+ processTerminateWaitInMs = 1000
+
+ killChildsPassCount = 4
+)
+
+var (
+ errFinishedProcess = errors.New("os: process already finished")
+)
- p, err := os.FindProcess(pid)
+// FindProcess looks for a running process by its pid
+func FindProcess(pid int) (*os.Process, error) {
+ var h syscall.Handle
+
+ process, err := os.FindProcess(pid)
+ if err != nil {
+ if isInvalidParameterError(err) { // NOTE: See function definition for details
+ return nil, nil
+ }
+ return nil, err
+ }
+
+ // If we have a process, check if it is terminated
+ h, err = syscall.OpenProcess(syscall.SYNCHRONIZE, false, uint32(pid))
if err == nil {
+ defer func() {
+ _ = syscall.CloseHandle(h)
+ }()
- for _, v := range getChildrenProcesses(pid) {
- _ = v.Kill()
+ ret, e2 := syscall.WaitForSingleObject(h, 0)
+ if e2 == nil && ret == syscall.WAIT_OBJECT_0 {
+ return nil, nil
}
+ } else {
+ if isInvalidParameterError(err) { // NOTE: See function definition for details
+ return nil, nil
+ }
+ }
- err = p.Kill()
+ return process, nil
+}
+
+// KillProcess kills a running OS process
+func KillProcess(pid int, signal os.Signal) error {
+ // Signal(0) only checks if we have access to kill a process and if it is really dead
+ if signal == syscall.Signal(0) {
+ return isProcessAlive(pid)
}
+
+ return killProcessTree(pid)
+}
+
+func isProcessAlive(pid int) error {
+ var ret uint32
+
+ h, err := syscall.OpenProcess(syscall.SYNCHRONIZE|syscall.PROCESS_TERMINATE, false, uint32(pid))
+ if err != nil {
+ if isInvalidParameterError(err) { // NOTE: See function definition for details
+ return errFinishedProcess
+ }
+ return err
+ }
+ ret, err = syscall.WaitForSingleObject(h, 0)
+ if err == nil && ret == syscall.WAIT_OBJECT_0 {
+ err = errFinishedProcess
+ }
+
+ _ = syscall.CloseHandle(h)
return err
}
-func getChildrenProcesses(parentPid int) []*os.Process {
- out := []*os.Process{}
+func killProcessTree(pid int) error {
+ err := killProcess(pid)
+ if err != nil {
+ return err
+ }
+
+ // We do several passes just in case the process being killed spawns a new one
+ for pass := 1; pass <= killChildsPassCount; pass++ {
+ childProcessList := getChildProcesses(pid)
+ if len(childProcessList) == 0 {
+ break
+ }
+ for _, childPid := range childProcessList {
+ killProcessTree(childPid)
+ }
+ }
+
+ return nil
+}
+
+func getChildProcesses(pid int) []int {
+ var pe32 windows.ProcessEntry32
+
+ out := make([]int, 0)
+
snap, err := windows.CreateToolhelp32Snapshot(windows.TH32CS_SNAPPROCESS, uint32(0))
- if err == nil {
- var pe32 windows.ProcessEntry32
-
- defer windows.CloseHandle(snap)
-
- pe32.Size = uint32(unsafe.Sizeof(pe32))
- if err := windows.Process32First(snap, &pe32); err == nil {
- for {
- if pe32.ParentProcessID == uint32(parentPid) {
- p, err := os.FindProcess(int(pe32.ProcessID))
- if err == nil {
- out = append(out, p)
- }
- }
- if err = windows.Process32Next(snap, &pe32); err != nil {
- break
- }
- }
+ if err != nil {
+ return out
+ }
+
+ defer func() {
+ _ = windows.CloseHandle(snap)
+ }()
+
+ pe32.Size = uint32(unsafe.Sizeof(pe32))
+ err = windows.Process32First(snap, &pe32)
+ for err != nil {
+ if pe32.ParentProcessID == uint32(pid) {
+ // Add to list
+ out = append(out, int(pe32.ProcessID))
}
+
+ err = windows.Process32Next(snap, &pe32)
}
+
return out
}
+
+func killProcess(pid int) error {
+ h, err := syscall.OpenProcess(syscall.SYNCHRONIZE | syscall.PROCESS_TERMINATE, false, uint32(pid))
+ if err == nil {
+ err = syscall.TerminateProcess(h, STATUS_CANCELLED)
+ if err == nil {
+ _, _ = syscall.WaitForSingleObject(h, processTerminateWaitInMs)
+ }
+
+ _ = syscall.CloseHandle(h)
+ }
+
+ return err
+}
+
+// NOTE: Unlike Unix, Windows tries to open the target process in order to kill it.
+// ERROR_INVALID_PARAMETER is returned if the process does not exists.
+// To mimic other OS behavior, if the process does not exist, don't return an error
+func isInvalidParameterError(err error) bool {
+ var syscallError syscall.Errno
+
+ if errors.As(err, &syscallError) {
+ if syscallError == ERROR_INVALID_PARAMETER {
+ return true
+ }
+ }
+ return false
+}