summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Lee <64482439+algojohnlee@users.noreply.github.com>2022-10-04 10:44:29 -0400
committerGitHub <noreply@github.com>2022-10-04 10:44:29 -0400
commit9869cd964ac8fede3056a4c2564d2f6528804cf6 (patch)
tree6a52faf584390914a31959310f745bfa031c8772
parent921e8f6f5a509fba1cd6cf00e7eeecc3ca9219b8 (diff)
parent2145c29f967f74e770ef2f414458c387edd7f9dc (diff)
Merge pull request #4618 from Algo-devops-service/relstable3.10.0v3.10.0-stable
go-algorand 3.10.0-stable Release PR
-rw-r--r--.circleci/config.yml615
-rw-r--r--.github/workflows/build.yml21
-rw-r--r--.github/workflows/codegen_verification.yml22
-rw-r--r--.github/workflows/reviewdog.yml12
-rw-r--r--.golangci-warnings.yml8
-rw-r--r--.golangci.yml45
-rw-r--r--CONTRIBUTING.md2
-rw-r--r--Makefile10
-rw-r--r--agreement/actions.go2
-rw-r--r--agreement/demux.go5
-rw-r--r--agreement/events.go5
-rw-r--r--agreement/fuzzer/tests_test.go3
-rw-r--r--agreement/msgp_gen.go58
-rw-r--r--agreement/proposal.go5
-rw-r--r--agreement/proposalStore.go10
-rw-r--r--buildnumber.dat2
-rw-r--r--catchup/catchpointService.go87
-rw-r--r--catchup/catchpointService_test.go91
-rw-r--r--catchup/ledgerFetcher.go6
-rw-r--r--catchup/ledgerFetcher_test.go4
-rw-r--r--catchup/networkFetcher.go134
-rw-r--r--catchup/networkFetcher_test.go190
-rw-r--r--catchup/service.go18
-rw-r--r--catchup/service_test.go16
-rw-r--r--catchup/universalFetcher.go4
-rw-r--r--cmd/algod/main.go32
-rw-r--r--cmd/algod/main_test.go5
-rw-r--r--cmd/algofix/main.go8
-rw-r--r--cmd/algofix/typecheck.go7
-rw-r--r--cmd/algoh/main.go5
-rw-r--r--cmd/algokey/common.go18
-rw-r--r--cmd/algokey/keyreg.go11
-rw-r--r--cmd/algokey/multisig.go9
-rw-r--r--cmd/algokey/sign.go7
-rw-r--r--cmd/algons/dnsCmd.go3
-rw-r--r--cmd/buildtools/genesis.go15
-rw-r--r--cmd/catchpointdump/net.go10
-rw-r--r--cmd/catchupsrv/download.go6
-rw-r--r--cmd/catchupsrv/main.go3
-rw-r--r--cmd/dbgen/main.go8
-rw-r--r--cmd/dispenser/server.go8
-rw-r--r--cmd/goal/account.go21
-rw-r--r--cmd/goal/accountsList.go5
-rw-r--r--cmd/goal/application.go87
-rw-r--r--cmd/goal/application_test.go143
-rw-r--r--cmd/goal/clerk.go12
-rw-r--r--cmd/goal/commands.go13
-rw-r--r--cmd/goal/multisig.go7
-rw-r--r--cmd/goal/node.go6
-rw-r--r--cmd/goal/tealsign.go8
-rw-r--r--cmd/loadgenerator/main.go18
-rw-r--r--cmd/netgoal/generate.go29
-rw-r--r--cmd/nodecfg/apply.go3
-rw-r--r--cmd/opdoc/opdoc.go2
-rw-r--r--cmd/pingpong/runCmd.go138
-rw-r--r--cmd/tealdbg/local.go4
-rw-r--r--cmd/tealdbg/localLedger.go6
-rw-r--r--cmd/tealdbg/main.go9
-rwxr-xr-xcmd/updater/update.sh2
-rw-r--r--components/mocks/mockCatchpointCatchupAccessor.go5
-rw-r--r--config/config.go3
-rw-r--r--config/config_test.go53
-rw-r--r--config/consensus.go28
-rw-r--r--config/defaultsGenerator/defaultsGenerator.go7
-rw-r--r--config/localTemplate.go2
-rw-r--r--config/migrate.go22
-rw-r--r--config/version.go2
-rw-r--r--crypto/batchverifier.go11
-rw-r--r--crypto/batchverifier_test.go4
-rw-r--r--crypto/merklesignature/const.go3
-rw-r--r--crypto/multisig.go49
-rw-r--r--crypto/multisig_test.go51
-rw-r--r--crypto/stateproof/coinGenerator.go7
-rw-r--r--daemon/algod/api/algod.oas2.json69
-rw-r--r--daemon/algod/api/algod.oas3.yml102
-rw-r--r--daemon/algod/api/algod2.oas2.json0
-rw-r--r--daemon/algod/api/client/restClient.go7
-rw-r--r--daemon/algod/api/server/v2/generated/private/routes.go307
-rw-r--r--daemon/algod/api/server/v2/generated/private/types.go7
-rw-r--r--daemon/algod/api/server/v2/generated/routes.go454
-rw-r--r--daemon/algod/api/server/v2/generated/types.go7
-rw-r--r--daemon/algod/api/server/v2/handlers.go33
-rw-r--r--daemon/algod/api/server/v2/test/handlers_test.go71
-rw-r--r--daemon/algod/server.go19
-rw-r--r--daemon/kmd/config/config.go4
-rw-r--r--daemon/kmd/server/server.go5
-rw-r--r--daemon/kmd/wallet/driver/sqlite.go3
-rw-r--r--data/abi/abi_encode.go617
-rw-r--r--data/abi/abi_encode_test.go1279
-rw-r--r--data/abi/abi_json.go291
-rw-r--r--data/abi/abi_json_test.go150
-rw-r--r--data/abi/abi_type.go498
-rw-r--r--data/abi/abi_type_test.go613
-rw-r--r--data/account/participationRegistry.go7
-rw-r--r--data/account/participationRegistry_test.go33
-rw-r--r--data/account/registeryDbOps.go9
-rw-r--r--data/accountManager.go4
-rw-r--r--data/accountManager_test.go44
-rw-r--r--data/basics/address.go17
-rw-r--r--data/bookkeeping/genesis.go4
-rw-r--r--data/bookkeeping/txn_merkle_test.go11
-rw-r--r--data/ledger.go2
-rw-r--r--data/pools/transactionPool.go4
-rw-r--r--data/pools/transactionPool_test.go117
-rw-r--r--data/transactions/logic/README.md9
-rw-r--r--data/transactions/logic/TEAL_opcodes.md59
-rw-r--r--data/transactions/logic/assembler.go799
-rw-r--r--data/transactions/logic/assembler_test.go813
-rw-r--r--data/transactions/logic/backwardCompat_test.go6
-rw-r--r--data/transactions/logic/debugger.go6
-rw-r--r--data/transactions/logic/debugger_test.go2
-rw-r--r--data/transactions/logic/doc.go29
-rw-r--r--data/transactions/logic/doc_test.go19
-rw-r--r--data/transactions/logic/eval.go138
-rw-r--r--data/transactions/logic/evalCrypto_test.go3
-rw-r--r--data/transactions/logic/evalStateful_test.go23
-rw-r--r--data/transactions/logic/eval_test.go326
-rw-r--r--data/transactions/logic/fields_test.go2
-rw-r--r--data/transactions/logic/frames.go128
-rw-r--r--data/transactions/logic/frames_test.go496
-rw-r--r--data/transactions/logic/langspec.json86
-rw-r--r--data/transactions/logic/opcodes.go242
-rw-r--r--data/transactions/logic/teal.tmLanguage.json2
-rw-r--r--data/transactions/verify/txn.go101
-rw-r--r--data/transactions/verify/txn_test.go114
-rw-r--r--data/transactions/verify/verifiedTxnCache.go10
-rw-r--r--data/transactions/verify/verifiedTxnCache_test.go8
-rw-r--r--data/txHandler.go12
-rw-r--r--data/txHandler_test.go161
-rw-r--r--gen/generate.go3
-rw-r--r--go.mod3
-rw-r--r--go.sum2
-rw-r--r--installer/genesis/alphanet/genesis.json313
-rw-r--r--installer/rpm/algorand/algorand.spec3
-rw-r--r--ledger/accountdb.go276
-rw-r--r--ledger/acctonline.go2
-rw-r--r--ledger/acctonline_test.go107
-rw-r--r--ledger/acctupdates.go24
-rw-r--r--ledger/acctupdates_test.go21
-rw-r--r--ledger/archival_test.go2
-rw-r--r--ledger/catchpointtracker.go4
-rw-r--r--ledger/catchpointtracker_test.go3
-rw-r--r--ledger/catchpointwriter.go73
-rw-r--r--ledger/catchpointwriter_test.go321
-rw-r--r--ledger/catchupaccessor.go107
-rw-r--r--ledger/catchupaccessor_test.go57
-rw-r--r--ledger/evalbench_test.go2
-rw-r--r--ledger/fullblock_perf_test.go638
-rw-r--r--ledger/internal/apptxn_test.go59
-rw-r--r--ledger/internal/eval.go2
-rw-r--r--ledger/internal/eval_blackbox_test.go26
-rw-r--r--ledger/ledger_test.go7
-rw-r--r--ledger/msgp_gen.go32
-rw-r--r--ledger/testing/randomAccounts.go2
-rw-r--r--ledger/voters_test.go86
-rw-r--r--libgoal/libgoal.go28
-rw-r--r--libgoal/lockedFile.go4
-rw-r--r--libgoal/transactions.go14
-rw-r--r--logging/cyclicWriter_test.go5
-rw-r--r--logging/telemetryspec/event.go10
-rw-r--r--netdeploy/network.go3
-rw-r--r--netdeploy/networkTemplate.go6
-rw-r--r--netdeploy/remote/deployedNetwork.go10
-rw-r--r--netdeploy/remote/nodecfg/nodeDir.go9
-rw-r--r--network/limitlistener/rejectingLimitListener_test.go3
-rw-r--r--network/wsNetwork.go19
-rw-r--r--network/wsNetwork_test.go4
-rw-r--r--network/wsPeer.go13
-rw-r--r--network/wsPeer_test.go28
-rw-r--r--node/node.go16
-rw-r--r--node/node_test.go45
-rw-r--r--nodecontrol/algodControl.go5
-rw-r--r--protocol/codec.go29
-rw-r--r--protocol/codec_test.go68
-rw-r--r--protocol/codec_tester.go64
-rw-r--r--protocol/consensus.go14
-rw-r--r--protocol/transcode/core_test.go3
-rw-r--r--rpcs/blockService_test.go4
-rw-r--r--rpcs/txSyncer_test.go27
-rwxr-xr-xscripts/build_deb.sh4
-rwxr-xr-xscripts/build_package.sh2
-rwxr-xr-xscripts/buildtools/check_tests.py10
-rwxr-xr-xscripts/buildtools/install_buildtools.sh2
-rw-r--r--scripts/buildtools/versions1
-rwxr-xr-xscripts/check_deps.sh2
-rwxr-xr-xscripts/compute_branch_channel.sh2
-rwxr-xr-xscripts/compute_branch_network.sh5
-rwxr-xr-xscripts/compute_package_name.sh10
-rwxr-xr-xscripts/get_golang_version.sh2
-rwxr-xr-xscripts/release/build/deb/build_deb.sh2
-rwxr-xr-xscripts/release/mule/common/get_channel.sh5
-rwxr-xr-xscripts/release/mule/deploy/docker/docker.sh8
-rwxr-xr-xscripts/release/mule/package/deb/package.sh2
-rwxr-xr-xscripts/release/mule/test/test.sh6
-rwxr-xr-xscripts/travis/codegen_verification.sh37
-rw-r--r--shared/pingpong/accounts.go871
-rw-r--r--shared/pingpong/accounts_test.go60
-rw-r--r--shared/pingpong/config.go82
-rw-r--r--shared/pingpong/pingpong.go1209
-rw-r--r--stateproof/worker_test.go4
-rw-r--r--test/commandandcontrol/cc_agent/component/pingPongComponent.go8
-rw-r--r--test/commandandcontrol/cc_client/main.go3
-rw-r--r--test/e2e-go/cli/algod/expect/algod_expect_test.go4
-rw-r--r--test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp12
-rw-r--r--test/e2e-go/cli/goal/expect/catchpointCatchupWebProxy/webproxy.go2
-rw-r--r--test/e2e-go/cli/goal/expect/goalExpectCommon.exp70
-rw-r--r--test/e2e-go/cli/goal/expect/goalFormattingTest.exp2
-rw-r--r--test/e2e-go/cli/goal/expect/statefulTealAppInfoTest.exp159
-rw-r--r--test/e2e-go/features/participation/participationRewards_test.go6
-rw-r--r--test/e2e-go/features/stateproofs/stateproofs_test.go30
-rw-r--r--test/e2e-go/features/transactions/asset_test.go21
-rw-r--r--test/e2e-go/kmd/e2e_kmd_wallet_multisig_test.go3
-rw-r--r--test/e2e-go/upgrades/rekey_support_test.go6
-rw-r--r--test/framework/fixtures/baseFixture.go3
-rw-r--r--test/framework/fixtures/expectFixture.go3
-rw-r--r--test/framework/fixtures/kmdFixture.go7
-rw-r--r--test/framework/fixtures/libgoalFixture.go26
-rw-r--r--test/framework/fixtures/restClientFixture.go50
-rw-r--r--test/heapwatch/README.md50
-rw-r--r--test/heapwatch/block_history.py258
-rw-r--r--test/heapwatch/block_history_plot.py162
-rw-r--r--test/heapwatch/block_history_relays.py109
-rw-r--r--test/heapwatch/client_ram_report.py60
-rw-r--r--test/heapwatch/heapWatch.py221
-rw-r--r--test/heapwatch/metrics_delta.py164
-rw-r--r--test/heapwatch/nodeHostTarget.py17
-rwxr-xr-xtest/heapwatch/plot_crr_csv.py75
-rw-r--r--test/heapwatch/runNodeHost.py17
-rw-r--r--test/netperf-go/puppeteer/promMetricFetcher.go4
-rw-r--r--test/netperf-go/puppeteer/puppeteer.go5
-rwxr-xr-xtest/scripts/e2e.sh2
-rwxr-xr-xtest/scripts/e2e_basic_start_stop.sh17
-rwxr-xr-xtest/scripts/e2e_client_runner.py4
-rwxr-xr-xtest/scripts/e2e_subs/goal-app-info.sh95
-rw-r--r--test/scripts/tps.py67
-rw-r--r--test/testdata/deployednettemplates/hosttemplates/hosttemplates.json168
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet-extension/Makefile15
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet-extension/gen_topology.py31
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet-extension/genesis.json154
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet-extension/net.json504
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet-extension/node.json10
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet-extension/nonPartNode.json5
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet-extension/recipe.json7
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet-extension/relay.json11
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet-extension/topology.json88
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py15
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/genesis.json128
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/net.json602
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/topology.json126
-rw-r--r--test/testdata/deployednettemplates/recipes/betanet-model-2/Makefile15
-rw-r--r--test/testdata/deployednettemplates/recipes/betanet-model-2/gen_topology.py31
-rw-r--r--test/testdata/deployednettemplates/recipes/betanet-model-2/genesis.json2614
-rw-r--r--test/testdata/deployednettemplates/recipes/betanet-model-2/net.json8434
-rw-r--r--test/testdata/deployednettemplates/recipes/betanet-model-2/node.json10
-rw-r--r--test/testdata/deployednettemplates/recipes/betanet-model-2/nonPartNode.json5
-rw-r--r--test/testdata/deployednettemplates/recipes/betanet-model-2/recipe.json7
-rw-r--r--test/testdata/deployednettemplates/recipes/betanet-model-2/relay.json11
-rw-r--r--test/testdata/deployednettemplates/recipes/betanet-model-2/topology.json304
-rw-r--r--tools/debug/algodump/main.go2
-rw-r--r--tools/debug/doberman/logo.go2
-rw-r--r--tools/debug/doberman/main.go3
-rw-r--r--tools/debug/dumpblocks/main.go122
-rw-r--r--tools/debug/logfilter/main_test.go3
-rw-r--r--tools/network/cloudflare/cloudflare.go16
-rw-r--r--tools/network/cloudflare/createRecord.go4
-rw-r--r--tools/network/cloudflare/deleteRecord.go4
-rw-r--r--tools/network/cloudflare/listRecords.go4
-rw-r--r--tools/network/cloudflare/zones.go4
-rw-r--r--tools/teal/algotmpl/main.go5
-rw-r--r--tools/teal/dkey/dsign/main.go12
-rw-r--r--tools/teal/tealcut/main.go3
-rw-r--r--util/codecs/json.go5
-rw-r--r--util/db/dbutil_test.go9
-rw-r--r--util/io.go5
-rw-r--r--util/metrics/metrics.go2
-rw-r--r--util/metrics/metrics_test.go4
-rw-r--r--util/metrics/tagcounter.go9
-rw-r--r--util/sleep_linux_32.go2
-rw-r--r--util/sleep_linux_64.go2
-rw-r--r--util/tokens/tokens.go4
281 files changed, 23496 insertions, 7373 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 5cfc869fc..81b4805a2 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -1,3 +1,7 @@
+# Disclaimer:
+# * Unless otherwise specified, assume `resource_class` and `parallelism` values are cherry-picked values that provided a reasonable enough build-duration-to-cost tradeoff at the time of choosing.
+# * There's too many variables (architecture types, CircleCI concurrency limitations, parallel pipeline runs, source code changes) to feel confident we've found a best-fit configuration.
+
version: 2.1
orbs:
@@ -38,19 +42,19 @@ executors:
resource_class: arm.large
mac_amd64_medium:
macos:
- xcode: 13.4.1
+ xcode: 13.2.1
resource_class: medium
environment:
HOMEBREW_NO_AUTO_UPDATE: "true"
mac_amd64_large:
macos:
- xcode: 13.4.1
+ xcode: 13.2.1
resource_class: large
environment:
HOMEBREW_NO_AUTO_UPDATE: "true"
mac_arm64: &executor-mac-arm64
machine: true
- resource_class: algorand/macstadium-m1
+ resource_class: algorand/macstadium-m1-macos11
environment:
HOMEBREW_NO_AUTO_UPDATE: "true"
# these are required b/c jobs explicitly assign sizes to the executors
@@ -60,23 +64,11 @@ executors:
mac_arm64_large:
<<: *executor-mac-arm64
+# ===== Workflow Definitions =====
workflows:
version: 2
"circleci_build_and_test":
jobs:
- - codegen_verification
-
- - build:
- name: << matrix.platform >>_build
- matrix: &matrix-default
- parameters:
- platform: ["amd64", "arm64", "mac_amd64"]
- filters: &filters-default
- branches:
- ignore:
- - /rel\/.*/
- - << pipeline.parameters.valid_nightly_branch >>
-
- build_nightly:
name: << matrix.platform >>_build_nightly
matrix: &matrix-nightly
@@ -91,10 +83,14 @@ workflows:
- test:
name: << matrix.platform >>_test
- matrix:
- <<: *matrix-default
- requires:
- - << matrix.platform >>_build
+ matrix: &matrix-default
+ parameters:
+ platform: ["amd64", "arm64"]
+ filters: &filters-default
+ branches:
+ ignore:
+ - /rel\/.*/
+ - << pipeline.parameters.valid_nightly_branch >>
- test_nightly:
name: << matrix.platform >>_test_nightly
@@ -108,8 +104,8 @@ workflows:
name: << matrix.platform >>_integration
matrix:
<<: *matrix-default
- requires:
- - << matrix.platform >>_build
+ filters:
+ <<: *filters-default
- integration_nightly:
name: << matrix.platform >>_integration_nightly
@@ -123,8 +119,8 @@ workflows:
name: << matrix.platform >>_e2e_expect
matrix:
<<: *matrix-default
- requires:
- - << matrix.platform >>_build
+ filters:
+ <<: *filters-default
- e2e_expect_nightly:
name: << matrix.platform >>_e2e_expect_nightly
@@ -138,8 +134,8 @@ workflows:
name: << matrix.platform >>_e2e_subs
matrix:
<<: *matrix-default
- requires:
- - << matrix.platform >>_build
+ filters:
+ <<: *filters-default
- e2e_subs_nightly:
name: << matrix.platform >>_e2e_subs_nightly
@@ -155,7 +151,7 @@ workflows:
name: << matrix.platform >>_<< matrix.job_type >>_verification
matrix:
parameters:
- platform: ["amd64", "arm64", "mac_amd64"]
+ platform: ["amd64", "arm64"]
job_type: ["test", "integration", "e2e_expect"]
requires:
- << matrix.platform >>_<< matrix.job_type >>
@@ -179,17 +175,245 @@ workflows:
- << matrix.platform >>_integration_nightly_verification
- << matrix.platform >>_e2e_expect_nightly_verification
- << matrix.platform >>_e2e_subs_nightly
- - codegen_verification
filters:
branches:
only:
- /rel\/.*/
+ - << pipeline.parameters.valid_nightly_branch >>
context:
- slack-secrets
- aws-secrets
#- windows_x64_build
+# ===== Job Definitions =====
+jobs:
+ build_nightly:
+ description: "Persists build artifacts to workspace in order to support `upload_binaries`."
+ parameters:
+ platform:
+ type: string
+ build_dir:
+ type: string
+ default: << pipeline.parameters.build_dir >>
+ executor: << parameters.platform >>_medium
+ working_directory: << pipeline.parameters.build_dir >>/project
+ steps:
+ - generic_build
+ - persist_to_workspace:
+ root: << parameters.build_dir >>
+ paths:
+ - project
+ - go
+ - gimme
+ - .gimme
+ - slack/notify: &slack-fail-event
+ event: fail
+ template: basic_fail_1
+
+ test:
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_medium
+ working_directory: << pipeline.parameters.build_dir >>/project
+ parallelism: 32
+ steps:
+ - generic_build
+ - generic_test:
+ platform: << parameters.platform >>
+ result_subdir: << parameters.platform >>_test
+ short_test_flag: "-short"
+ - upload_coverage
+
+ test_nightly:
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_medium
+ working_directory: << pipeline.parameters.build_dir >>/project
+ parallelism: 4
+ steps:
+ - generic_build
+ - generic_test:
+ platform: << parameters.platform >>
+ result_subdir: << parameters.platform >>_test_nightly
+ no_output_timeout: 45m
+ - upload_coverage
+ - slack/notify: &slack-fail-event
+ event: fail
+ template: basic_fail_1
+
+ integration:
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_large
+ working_directory: << pipeline.parameters.build_dir >>/project
+ parallelism: 16
+ environment:
+ E2E_TEST_FILTER: "GO"
+ steps:
+ - generic_build
+ - generic_integration:
+ platform: << parameters.platform >>
+ result_subdir: << parameters.platform >>_integration
+ short_test_flag: "-short"
+
+ integration_nightly:
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_large
+ working_directory: << pipeline.parameters.build_dir >>/project
+ parallelism: 4
+ environment:
+ E2E_TEST_FILTER: "GO"
+ steps:
+ - generic_build
+ - generic_integration:
+ platform: << parameters.platform >>
+ result_subdir: << parameters.platform >>_integration_nightly
+ no_output_timeout: 45m
+ - slack/notify:
+ <<: *slack-fail-event
+
+ e2e_expect:
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_medium
+ working_directory: << pipeline.parameters.build_dir >>/project
+ parallelism: 10
+ environment:
+ E2E_TEST_FILTER: "EXPECT"
+ steps:
+ - generic_build
+ - generic_integration:
+ platform: << parameters.platform >>
+ result_subdir: << parameters.platform >>_e2e_expect
+ short_test_flag: "-short"
+
+ e2e_expect_nightly:
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_medium
+ working_directory: << pipeline.parameters.build_dir >>/project
+ parallelism: 2
+ environment:
+ E2E_TEST_FILTER: "EXPECT"
+ steps:
+ - generic_build
+ - generic_integration:
+ platform: << parameters.platform >>
+ result_subdir: << parameters.platform>>_e2e_expect_nightly
+ no_output_timeout: 45m
+ - slack/notify:
+ <<: *slack-fail-event
+
+ e2e_subs:
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_large
+ working_directory: << pipeline.parameters.build_dir >>/project
+ environment:
+ E2E_TEST_FILTER: "SCRIPTS"
+ steps:
+ - generic_build
+ - generic_integration:
+ platform: << parameters.platform >>
+ result_subdir: << parameters.platform >>_e2e_subs
+ short_test_flag: "-short"
+
+ e2e_subs_nightly:
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_large
+ working_directory: << pipeline.parameters.build_dir >>/project
+ environment:
+ E2E_TEST_FILTER: "SCRIPTS"
+ CI_PLATFORM: << parameters.platform >>
+ # This platform is arbitrary, basically we just want to keep temps for
+ # one of the platforms in the matrix.
+ CI_KEEP_TEMP_PLATFORM: "amd64"
+ steps:
+ - generic_build
+ - generic_integration:
+ platform: << parameters.platform >>
+ result_subdir: << parameters.platform >>_e2e_subs_nightly
+ no_output_timeout: 45m
+ - slack/notify:
+ <<: *slack-fail-event
+
+ windows_x64_build:
+ executor:
+ name: win/default
+ size: large
+ steps:
+ - checkout
+ - prepare_windows
+ - run:
+ no_output_timeout: 45m
+ command: |
+ # export PATH=$(echo "$PATH" | sed -e 's|:/home/circleci/\.go_workspace/bin||g' | sed -e 's|:/usr/local/go/bin||g')
+ export GOPATH="/home/circleci/go"
+ export ALGORAND_DEADLOCK=enable
+ export SKIP_GO_INSTALLATION=True
+ export PATH=/mingw64/bin:/C/tools/msys64/mingw64/bin:/usr/bin:$PATH
+ export MAKE=mingw32-make
+ $msys2 scripts/travis/build_test.sh
+ shell: bash.exe
+
+ tests_verification_job:
+ docker:
+ - image: python:3.9.6-alpine
+ resource_class: small
+ working_directory: << pipeline.parameters.build_dir >>/project
+ parameters:
+ platform:
+ type: string
+ job_type:
+ type: string
+ steps:
+ - checkout
+ - tests_verification_command:
+ result_subdir: << parameters.platform >>_<< parameters.job_type >>
+
+ tests_verification_job_nightly:
+ docker:
+ - image: python:3.9.6-alpine
+ resource_class: small
+ working_directory: << pipeline.parameters.build_dir >>/project
+ parameters:
+ platform:
+ type: string
+ job_type:
+ type: string
+ steps:
+ - checkout
+ - tests_verification_command:
+ result_subdir: << parameters.platform >>_<< parameters.job_type >>
+ - slack/notify:
+ <<: *slack-fail-event
+
+ upload_binaries:
+ working_directory: << pipeline.parameters.build_dir >>/project
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_medium
+ steps:
+ - prepare_build_dir
+ - prepare_go
+ - upload_binaries_command:
+ platform: << parameters.platform >>
+ - slack/notify:
+ <<: *slack-fail-event
+
+# ===== Command Definitions =====
commands:
prepare_go:
description: Clean out existing Go so we can use our preferred version
@@ -219,7 +443,7 @@ commands:
shell: bash.exe
command: |
choco install -y msys2 pacman make wget --force
- choco install -y golang --version=1.17.9 --force
+ choco install -y golang --version=1.17.13 --force
choco install -y python3 --version=3.7.3 --force
export msys2='cmd //C RefreshEnv.cmd '
export msys2+='& set MSYS=winsymlinks:nativestrict '
@@ -229,21 +453,22 @@ commands:
$msys2 pacman --sync --noconfirm --needed mingw-w64-x86_64-toolchain mingw-w64-x86_64-libtool unzip autoconf automake
generic_build:
- description: Run basic build and store in workspace for re-use by different architectures
+ description: >
+ Run basic build.
+
+ If command execution time increases _appreciably_, revisit CI topology:
+ * Historically, the command executes _quickly_ (< 3m with resource class >= medium).
+ * Consequently, it's faster to embed the command in a combined build + test workflow rather than independent build and test workflows.
parameters:
build_dir:
type: string
default: << pipeline.parameters.build_dir >>
steps:
+ - prepare_build_dir
+ - checkout
+ - prepare_go
- restore_libsodium
- - restore_cache:
- keys:
- - 'go-mod-1.17.9-v3-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}'
- - restore_cache:
- keys:
- - 'go-cache-v3-{{ arch }}-{{ .Branch }}-{{ .Revision }}'
- - 'go-cache-v3-{{ arch }}-{{ .Branch }}-'
- - 'go-cache-v3-{{ arch }}-'
+ - restore_go_caches
- run:
name: scripts/travis/build.sh --make_debug
command: |
@@ -255,21 +480,41 @@ commands:
export GIMME_VERSION_PREFIX=<< parameters.build_dir >>/.gimme/versions
scripts/travis/build.sh --make_debug
- cache_libsodium
+ - save_go_caches
+
+ save_go_caches:
+ description: Cache Go source and build caches
+ parameters:
+ build_dir:
+ type: string
+ default: << pipeline.parameters.build_dir >>
+ steps:
- save_cache:
- key: 'go-mod-1.17.9-v3-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}'
+ name: Saving Go mod source cache
+ key: go-mod-v5-{{ .Branch }}-{{ checksum "go.sum" }}
paths:
- << parameters.build_dir >>/go/pkg/mod
- save_cache:
- key: 'go-cache-v3-{{ arch }}-{{ .Branch }}-{{ .Revision }}'
+ name: Saving Go build cache
+ key: go-cache-v5-{{ arch }}-{{ .Branch }}-{{ checksum "go.sum" }}
paths:
- tmp/go-cache
- - persist_to_workspace:
- root: << parameters.build_dir >>
- paths:
- - project
- - go
- - gimme
- - .gimme
+
+ restore_go_caches:
+ description: Restore Go source and build caches
+ steps:
+ - restore_cache:
+ name: Restoring Go mod source cache
+ keys:
+ - go-mod-v5-{{ .Branch }}-{{ checksum "go.sum" }}
+ - go-mod-v5-{{ .Branch }}-
+ - go-mod-v5-master-
+ - restore_cache:
+ name: Restoring Go build cache
+ keys:
+ - go-cache-v5-{{ arch }}-{{ .Branch }}-{{ checksum "go.sum" }}
+ - go-cache-v5-{{ arch }}-{{ .Branch }}-
+ - go-cache-v5-{{ arch }}-master-
cache_libsodium:
description: Cache libsodium for build
@@ -280,7 +525,8 @@ commands:
mkdir -p tmp
find crypto/libsodium-fork -type f -exec openssl md5 "{}" + > tmp/libsodium.md5
- save_cache:
- key: 'libsodium-fork-v2-{{ arch }}-{{ checksum "tmp/libsodium.md5" }}'
+ name: Save cached libsodium build
+ key: 'libsodium-fork-v4-{{ arch }}-{{ checksum "tmp/libsodium.md5" }}'
paths:
- crypto/libs
@@ -293,8 +539,9 @@ commands:
mkdir -p tmp
find crypto/libsodium-fork -type f -exec openssl md5 "{}" + > tmp/libsodium.md5
- restore_cache:
+ name: Restore cached libsodium build
keys:
- - 'libsodium-fork-v2-{{ arch }}-{{ checksum "tmp/libsodium.md5" }}'
+ - 'libsodium-fork-v4-{{ arch }}-{{ checksum "tmp/libsodium.md5" }}'
generic_test:
description: Run build tests from build workspace, for re-use by diferent architectures
@@ -316,17 +563,10 @@ commands:
type: string
default: << pipeline.parameters.result_path >>
steps:
- - attach_workspace:
- at: << parameters.build_dir >>
- run: |
mkdir -p << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}
touch << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/results.xml
touch << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/testresults.json
- - restore_cache:
- keys:
- - 'go-cache-v3-{{ arch }}-{{ .Branch }}-{{ .Revision }}'
- - 'go-cache-v3-{{ arch }}-{{ .Branch }}-'
- - 'go-cache-v3-{{ arch }}-'
- run:
name: Run build tests
no_output_timeout: << parameters.no_output_timeout >>
@@ -388,8 +628,6 @@ commands:
type: string
default: << pipeline.parameters.result_path >>
steps:
- - attach_workspace:
- at: << parameters.build_dir >>
- run: |
mkdir -p << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}
touch << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/results.xml
@@ -443,9 +681,21 @@ commands:
at: << parameters.result_path >>
- run:
name: Check if all tests were run
+ # Add to --ignored-tests when a test should _not_ be considered.
+ # * For example, E2E expect test runners (e.g. `TestAlgodWithExpect`)
+ # produce partitioned subtests.
+ # * The parent tests are deliberately _not_ partitioned. By ignoring
+ # these tests, `check_tests.py` won't provide conflicting advice to
+ # partition the parent tests.
command: |
cat << parameters.result_path >>/<< parameters.result_subdir >>/**/testresults.json > << parameters.result_path >>/<< parameters.result_subdir >>/combined_testresults.json
- python3 scripts/buildtools/check_tests.py << parameters.result_path >>/<< parameters.result_subdir >>/combined_testresults.json
+ python3 scripts/buildtools/check_tests.py \
+ --tests-results-filepath << parameters.result_path >>/<< parameters.result_subdir >>/combined_testresults.json \
+ --ignored-tests \
+ TestAlgodWithExpect \
+ TestAlgohWithExpect \
+ TestGoalWithExpect \
+ TestTealdbgWithExpect
- store_artifacts:
path: << parameters.result_path >>/<< parameters.result_subdir >>
destination: << parameters.result_subdir >>/combined-test-results
@@ -481,250 +731,3 @@ commands:
command: |
export TRAVIS_BRANCH=${CIRCLE_BRANCH}
scripts/travis/test_release.sh
-
-jobs:
- codegen_verification:
- executor: amd64_medium
- steps:
- - checkout
- - prepare_go
- - run: |
- export PATH=$(echo "$PATH" | sed -e 's|:/home/circleci/\.go_workspace/bin||g' | sed -e 's|:/usr/local/go/bin||g')
- export GOPATH="/home/circleci/go"
- scripts/travis/codegen_verification.sh
-
- build:
- parameters:
- platform:
- type: string
- executor: << parameters.platform >>_medium
- working_directory: << pipeline.parameters.build_dir >>/project
- steps:
- - prepare_build_dir
- - checkout
- - prepare_go
- - generic_build
-
- build_nightly:
- parameters:
- platform:
- type: string
- executor: << parameters.platform >>_medium
- working_directory: << pipeline.parameters.build_dir >>/project
- steps:
- - prepare_build_dir
- - checkout
- - prepare_go
- - generic_build
- - slack/notify: &slack-fail-event
- event: fail
- template: basic_fail_1
-
- test:
- parameters:
- platform:
- type: string
- executor: << parameters.platform >>_medium
- working_directory: << pipeline.parameters.build_dir >>/project
- parallelism: 4
- steps:
- - prepare_build_dir
- - prepare_go
- - generic_test:
- platform: << parameters.platform >>
- result_subdir: << parameters.platform >>_test
- short_test_flag: "-short"
- - upload_coverage
-
- test_nightly:
- parameters:
- platform:
- type: string
- executor: << parameters.platform >>_medium
- working_directory: << pipeline.parameters.build_dir >>/project
- parallelism: 4
- steps:
- - prepare_build_dir
- - prepare_go
- - generic_test:
- platform: << parameters.platform >>
- result_subdir: << parameters.platform >>_test_nightly
- no_output_timeout: 45m
- - upload_coverage
- - slack/notify:
- <<: *slack-fail-event
-
- integration:
- parameters:
- platform:
- type: string
- executor: << parameters.platform >>_large
- working_directory: << pipeline.parameters.build_dir >>/project
- parallelism: 2
- environment:
- E2E_TEST_FILTER: "GO"
- steps:
- - prepare_build_dir
- - prepare_go
- - generic_integration:
- platform: << parameters.platform >>
- result_subdir: << parameters.platform >>_integration
- short_test_flag: "-short"
-
- integration_nightly:
- parameters:
- platform:
- type: string
- executor: << parameters.platform >>_large
- working_directory: << pipeline.parameters.build_dir >>/project
- parallelism: 4
- environment:
- E2E_TEST_FILTER: "GO"
- steps:
- - prepare_build_dir
- - prepare_go
- - generic_integration:
- platform: << parameters.platform >>
- result_subdir: << parameters.platform >>_integration_nightly
- no_output_timeout: 45m
- - slack/notify:
- <<: *slack-fail-event
-
- e2e_expect:
- parameters:
- platform:
- type: string
- executor: << parameters.platform >>_medium
- working_directory: << pipeline.parameters.build_dir >>/project
- parallelism: 2
- environment:
- E2E_TEST_FILTER: "EXPECT"
- steps:
- - prepare_build_dir
- - prepare_go
- - generic_integration:
- platform: << parameters.platform >>
- result_subdir: << parameters.platform >>_e2e_expect
- short_test_flag: "-short"
-
- e2e_expect_nightly:
- parameters:
- platform:
- type: string
- executor: << parameters.platform >>_medium
- working_directory: << pipeline.parameters.build_dir >>/project
- parallelism: 2
- environment:
- E2E_TEST_FILTER: "EXPECT"
- steps:
- - prepare_build_dir
- - prepare_go
- - generic_integration:
- platform: << parameters.platform >>
- result_subdir: << parameters.platform>>_e2e_expect_nightly
- no_output_timeout: 45m
- - slack/notify:
- <<: *slack-fail-event
-
- e2e_subs:
- parameters:
- platform:
- type: string
- executor: << parameters.platform >>_large
- working_directory: << pipeline.parameters.build_dir >>/project
- environment:
- E2E_TEST_FILTER: "SCRIPTS"
- steps:
- - prepare_build_dir
- - prepare_go
- - generic_integration:
- platform: << parameters.platform >>
- result_subdir: << parameters.platform >>_e2e_subs
- short_test_flag: "-short"
-
- e2e_subs_nightly:
- parameters:
- platform:
- type: string
- executor: << parameters.platform >>_large
- working_directory: << pipeline.parameters.build_dir >>/project
- environment:
- E2E_TEST_FILTER: "SCRIPTS"
- CI_PLATFORM: << parameters.platform >>
- # This platform is arbitrary, basically we just want to keep temps for
- # one of the platforms in the matrix.
- CI_KEEP_TEMP_PLATFORM: "amd64"
- steps:
- - prepare_build_dir
- - prepare_go
- - generic_integration:
- platform: << parameters.platform >>
- result_subdir: << parameters.platform >>_e2e_subs_nightly
- no_output_timeout: 45m
- - slack/notify:
- <<: *slack-fail-event
-
- windows_x64_build:
- executor:
- name: win/default
- size: large
- steps:
- - checkout
- - prepare_windows
- - run:
- no_output_timeout: 45m
- command: |
- #export PATH=$(echo "$PATH" | sed -e 's|:/home/circleci/\.go_workspace/bin||g' | sed -e 's|:/usr/local/go/bin||g')
- export GOPATH="/home/circleci/go"
- export ALGORAND_DEADLOCK=enable
- export SKIP_GO_INSTALLATION=True
- export PATH=/mingw64/bin:/C/tools/msys64/mingw64/bin:/usr/bin:$PATH
- export MAKE=mingw32-make
- $msys2 scripts/travis/build_test.sh
- shell: bash.exe
-
- tests_verification_job:
- docker:
- - image: python:3.9.6-alpine
- resource_class: small
- working_directory: << pipeline.parameters.build_dir >>/project
- parameters:
- platform: # platform: ["amd64", "arm64", "mac_amd64"]
- type: string
- job_type: # job_type: ["test", "test_nightly", "integration", "integration_nightly", "e2e_expect", "e2e_expect_nightly"]
- type: string
- steps:
- - checkout
- - tests_verification_command:
- result_subdir: << parameters.platform >>_<< parameters.job_type >>
-
- tests_verification_job_nightly:
- docker:
- - image: python:3.9.6-alpine
- resource_class: small
- working_directory: << pipeline.parameters.build_dir >>/project
- parameters:
- platform: # platform: ["amd64", "arm64", "mac_amd64"]
- type: string
- job_type: # job_type: ["test", "test_nightly", "integration", "integration_nightly", "e2e_expect", "e2e_expect_nightly"]
- type: string
- steps:
- - checkout
- - tests_verification_command:
- result_subdir: << parameters.platform >>_<< parameters.job_type >>
- - slack/notify:
- <<: *slack-fail-event
-
- upload_binaries:
- working_directory: << pipeline.parameters.build_dir >>/project
- parameters:
- platform:
- type: string
- executor: << parameters.platform >>_medium
- steps:
- - prepare_build_dir
- - prepare_go
- - upload_binaries_command:
- platform: << parameters.platform >>
- - slack/notify:
- <<: *slack-fail-event
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 825056b1e..58b462723 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -1,8 +1,11 @@
-name: "Build workflow"
+name: "Build Windows"
on:
+ push:
+ branches:
+ - master
pull_request:
jobs:
- build-test-windows:
+ build-windows:
runs-on: windows-2022
defaults:
run:
@@ -14,14 +17,20 @@ jobs:
update: true
path-type: inherit
- name: Check out code
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Install golang
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: '1.17.9'
- - name: Build Test
+ go-version: "1.17.13"
+ - name: Restore libsodium from cache
+ id: cache-libsodium
+ uses: actions/cache@v3
+ with:
+ path: crypto/libs
+ key: libsodium-fork-v2-${{ runner.os }}-${{ hashFiles('crypto/libsodium-fork/**') }}
+ - name: Build
run: |
export ALGORAND_DEADLOCK=enable
export SKIP_GO_INSTALLATION=True
diff --git a/.github/workflows/codegen_verification.yml b/.github/workflows/codegen_verification.yml
new file mode 100644
index 000000000..cdeed288b
--- /dev/null
+++ b/.github/workflows/codegen_verification.yml
@@ -0,0 +1,22 @@
+name: "codegen verification"
+on:
+ push:
+ branches:
+ - master
+ pull_request:
+jobs:
+ codegen_verification:
+ runs-on: ubuntu-20.04
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+ path: go-algorand
+ - name: Uninstall existing go installation
+ run: sudo apt-get -y -q purge golang-go
+ - name: Run codegen_verification.sh
+ run: |
+ export GOPATH="${GITHUB_WORKSPACE}/go"
+ cd go-algorand
+ scripts/travis/codegen_verification.sh
diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml
index 6cc82a6e5..736b8a6bc 100644
--- a/.github/workflows/reviewdog.yml
+++ b/.github/workflows/reviewdog.yml
@@ -1,5 +1,8 @@
name: "ReviewDog workflow"
on:
+ push:
+ branches:
+ - master
pull_request:
jobs:
# Blocking Errors Section
@@ -18,12 +21,13 @@ jobs:
- name: reviewdog-golangci-lint
uses: reviewdog/action-golangci-lint@v2
with:
- golangci_lint_version: "v1.41.1"
+ golangci_lint_version: "v1.47.3"
golangci_lint_flags: "-c .golangci.yml --allow-parallel-runners"
- reporter: "github-pr-review"
+ reporter: "github-pr-check"
tool_name: "Lint Errors"
level: "error"
fail_on_error: true
+ filter_mode: "nofilter"
# Non-Blocking Warnings Section
reviewdog-warnings:
runs-on: ubuntu-latest
@@ -44,7 +48,7 @@ jobs:
- name: Install specific golang
uses: actions/setup-go@v2
with:
- go-version: '1.17.9'
+ go-version: '1.17.13'
- name: Create folders for golangci-lint
run: mkdir -p cicdtmp/golangci-lint
- name: Check if custom golangci-lint is already built
@@ -59,7 +63,7 @@ jobs:
run: |
cd cicdtmp/golangci-lint
git clone https://github.com/golangci/golangci-lint.git .
- git checkout tags/v1.41.1
+ git checkout tags/v1.47.3
CGO_ENABLED=true go build -trimpath -o golangci-lint-cgo ./cmd/golangci-lint
./golangci-lint-cgo --version
cd ../../
diff --git a/.golangci-warnings.yml b/.golangci-warnings.yml
index c0d9e1e38..f0f2eee48 100644
--- a/.golangci-warnings.yml
+++ b/.golangci-warnings.yml
@@ -5,14 +5,12 @@ run:
linters:
disable-all: true
enable:
- - staticcheck
+ - deadcode
+ - partitiontest
- structcheck
- typecheck
- varcheck
- - deadcode
- - gosimple
- unused
- - partitiontest
linters-settings:
@@ -41,8 +39,6 @@ issues:
exclude:
# ignore govet false positive fixed in https://github.com/golang/go/issues/45043
- "sigchanyzer: misuse of unbuffered os.Signal channel as argument to signal.Notify"
- # ignore golint false positive fixed in https://github.com/golang/lint/pull/487
- - "exported method (.*).Unwrap` should have comment or be unexported"
# ignore issues about the way we use _struct fields to define encoding settings
- "`_struct` is unused"
diff --git a/.golangci.yml b/.golangci.yml
index 9cf49999f..271c682e5 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,23 +1,44 @@
run:
timeout: 5m
- tests: false
+ tests: true
linters:
+ # default: deadcode, errcheck, gosimple, govet, ineffassign, staticcheck, typecheck, unused, varcheck
disable-all: true
enable:
- errcheck
- gofmt
- - golint
+ - gosimple
- govet
- ineffassign
- misspell
+ - nolintlint
+ - revive
+ - staticcheck
+ - typecheck
severity:
default-severity: error
+linters-settings:
+ nolintlint:
+ # require naming a specific linter X using //nolint:X
+ require-specific: true
+ # require comments like "//nolint:errcheck // Explanation of why we are ignoring linter here..."
+ require-explanation: true
+ errcheck:
+ exclude-functions:
+ # data/transactions/logic/assembler.go uses ops.error, warn, to append log messages: OK to ignore for this case
+ - (*github.com/algorand/go-algorand/data/transactions/logic.OpStream).errorf
+ - (*github.com/algorand/go-algorand/data/transactions/logic.OpStream).error
+ - (*github.com/algorand/go-algorand/data/transactions/logic.OpStream).warnf
+ - (*github.com/algorand/go-algorand/data/transactions/logic.OpStream).warn
+
issues:
- # use these new lint checks on code since #2574
- new-from-rev: eb019291beed556ec6ac1ceb4a15114ce4df0c57
+ # Work our way back over time to be clean against all these
+ # checkers. If you'd like to contribute, raise the number after ~,
+ # run the linter and dig in.
+ new-from-rev: eb019291beed556ec6ac1ceb4a15114ce4df0c57~25
# Disable default exclude rules listed in `golangci-lint run --help` (selectively re-enable some below)
exclude-use-default: false
@@ -41,14 +62,28 @@ issues:
- Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked
# "EXC0005 staticcheck: Developers tend to write in C-style with an explicit 'break' in a 'switch', so it's ok to ignore"
- ineffective break statement. Did you mean to break out of the outer loop
+ # revive: irrelevant error about naming
+ - "var-naming: don't use leading k in Go names"
exclude-rules:
+ - path: _test\.go
+ linters:
+ - errcheck
+ - gofmt
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
+ - nolintlint
+ # - revive
+ - staticcheck
+ - typecheck
# Add all linters here -- Comment this block out for testing linters
- path: test/linttest/lintissues\.go
linters:
- errcheck
- gofmt
- - golint
+ - revive
- govet
- ineffassign
- misspell
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 1d691afed..517fb17fa 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -45,7 +45,7 @@ Again, if you have a patch for a critical security vulnerability, please use our
For Go code we use the [Golang guidelines defined here](https://golang.org/doc/effective_go.html).
* Code must adhere to the official Go formatting guidelines (i.e. uses gofmt).
-* We use **gofmt** and **golint**. Also make sure to run `make sanity` and `make generate` before opening a pull request.
+* We use **gofmt** and **golangci-lint**. Also make sure to run `make sanity` and `make generate` before opening a pull request.
* Code must be documented adhering to the official Go commentary guidelines.
For JavaScript code we use the [MDN formatting rules](https://developer.mozilla.org/en-US/docs/MDN/Contribute/Guidelines/Code_guidelines/JavaScript).
diff --git a/Makefile b/Makefile
index 0860d71f7..a7613fbf4 100644
--- a/Makefile
+++ b/Makefile
@@ -8,7 +8,6 @@ else
export GOPATH := $(shell go env GOPATH)
GOPATH1 := $(firstword $(subst :, ,$(GOPATH)))
endif
-export GOPROXY := direct
SRCPATH := $(shell pwd)
ARCH := $(shell ./scripts/archtype.sh)
OS_TYPE := $(shell ./scripts/ostype.sh)
@@ -100,15 +99,12 @@ fix: build
$(GOPATH1)/bin/algofix */
lint: deps
- $(GOPATH1)/bin/golint ./...
-
-vet:
- go vet ./...
+ $(GOPATH1)/bin/golangci-lint run -c .golangci.yml
check_shell:
find . -type f -name "*.sh" -exec shellcheck {} +
-sanity: vet fix lint fmt
+sanity: fix lint fmt
cover:
go test $(GOTAGS) -coverprofile=cover.out $(UNIT_TEST_SOURCES)
@@ -331,7 +327,7 @@ dump: $(addprefix gen/,$(addsuffix /genesis.dump, $(NETWORKS)))
install: build
scripts/dev_install.sh -p $(GOPATH1)/bin
-.PHONY: default fmt vet lint check_shell sanity cover prof deps build test fulltest shorttest clean cleango deploy node_exporter install %gen gen NONGO_BIN check-go-version rebuild_swagger
+.PHONY: default fmt lint check_shell sanity cover prof deps build test fulltest shorttest clean cleango deploy node_exporter install %gen gen NONGO_BIN check-go-version rebuild_swagger
###### TARGETS FOR CICD PROCESS ######
include ./scripts/release/mule/Makefile.mule
diff --git a/agreement/actions.go b/agreement/actions.go
index 42cfbcebb..779d9d467 100644
--- a/agreement/actions.go
+++ b/agreement/actions.go
@@ -236,6 +236,7 @@ func (a ensureAction) do(ctx context.Context, s *Service) {
Hash: a.Certificate.Proposal.BlockDigest.String(),
Round: uint64(a.Certificate.Round),
ValidatedAt: a.Payload.validatedAt,
+ ReceivedAt: a.Payload.receivedAt,
PreValidated: true,
PropBufLen: uint64(len(s.demux.rawProposals)),
VoteBufLen: uint64(len(s.demux.rawVotes)),
@@ -250,6 +251,7 @@ func (a ensureAction) do(ctx context.Context, s *Service) {
Hash: a.Certificate.Proposal.BlockDigest.String(),
Round: uint64(a.Certificate.Round),
ValidatedAt: a.Payload.validatedAt,
+ ReceivedAt: a.Payload.receivedAt,
PreValidated: false,
PropBufLen: uint64(len(s.demux.rawProposals)),
VoteBufLen: uint64(len(s.demux.rawVotes)),
diff --git a/agreement/demux.go b/agreement/demux.go
index 70d7fc9ab..7379590d5 100644
--- a/agreement/demux.go
+++ b/agreement/demux.go
@@ -198,8 +198,11 @@ func (d *demux) next(s *Service, deadline time.Duration, fastDeadline time.Durat
proto, err := d.ledger.ConsensusVersion(ParamsRound(e.ConsensusRound()))
e = e.AttachConsensusVersion(ConsensusVersionView{Err: makeSerErr(err), Version: proto})
- if e.t() == payloadVerified {
+ switch e.t() {
+ case payloadVerified:
e = e.(messageEvent).AttachValidatedAt(s.Clock.Since())
+ case payloadPresent:
+ e = e.(messageEvent).AttachReceivedAt(s.Clock.Since())
}
}()
diff --git a/agreement/events.go b/agreement/events.go
index a75a32774..61176872a 100644
--- a/agreement/events.go
+++ b/agreement/events.go
@@ -938,3 +938,8 @@ func (e messageEvent) AttachValidatedAt(d time.Duration) messageEvent {
e.Input.Proposal.validatedAt = d
return e
}
+
+func (e messageEvent) AttachReceivedAt(d time.Duration) messageEvent {
+ e.Input.Proposal.receivedAt = d
+ return e
+}
diff --git a/agreement/fuzzer/tests_test.go b/agreement/fuzzer/tests_test.go
index 75a0cc67c..2dfc83707 100644
--- a/agreement/fuzzer/tests_test.go
+++ b/agreement/fuzzer/tests_test.go
@@ -20,7 +20,6 @@ import (
"encoding/json"
"flag"
"fmt"
- "io/ioutil"
"log"
"math"
"math/rand"
@@ -440,7 +439,7 @@ func TestFuzzer(t *testing.T) {
t.Run(testName, func(t *testing.T) {
partitiontest.PartitionTest(t) // Check if this expect test should by run, may SKIP
jsonFilename := jsonFiles[testName]
- jsonBytes, err := ioutil.ReadFile(jsonFilename)
+ jsonBytes, err := os.ReadFile(jsonFilename)
require.NoError(t, err)
var fuzzerTest FuzzerTestFile
err = json.Unmarshal(jsonBytes, &fuzzerTest)
diff --git a/agreement/msgp_gen.go b/agreement/msgp_gen.go
index e5508cd47..4581b07a9 100644
--- a/agreement/msgp_gen.go
+++ b/agreement/msgp_gen.go
@@ -1357,7 +1357,7 @@ func (z *proposal) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
zb0004Len := uint32(29)
- var zb0004Mask uint64 /* 37 bits */
+ var zb0004Mask uint64 /* 38 bits */
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0 {
zb0004Len--
zb0004Mask |= 0x40
@@ -1420,59 +1420,59 @@ func (z *proposal) MarshalMsg(b []byte) (o []byte) {
}
if (*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero() {
zb0004Len--
- zb0004Mask |= 0x200000
+ zb0004Mask |= 0x400000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() {
zb0004Len--
- zb0004Mask |= 0x400000
+ zb0004Mask |= 0x800000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero() {
zb0004Len--
- zb0004Mask |= 0x800000
+ zb0004Mask |= 0x1000000
}
if (*z).unauthenticatedProposal.SeedProof.MsgIsZero() {
zb0004Len--
- zb0004Mask |= 0x1000000
+ zb0004Mask |= 0x2000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero() {
zb0004Len--
- zb0004Mask |= 0x2000000
+ zb0004Mask |= 0x4000000
}
if len((*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking) == 0 {
zb0004Len--
- zb0004Mask |= 0x4000000
+ zb0004Mask |= 0x8000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0 {
zb0004Len--
- zb0004Mask |= 0x8000000
+ zb0004Mask |= 0x10000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0 {
zb0004Len--
- zb0004Mask |= 0x10000000
+ zb0004Mask |= 0x20000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero() {
zb0004Len--
- zb0004Mask |= 0x20000000
+ zb0004Mask |= 0x40000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero() {
zb0004Len--
- zb0004Mask |= 0x40000000
+ zb0004Mask |= 0x80000000
}
if (*z).unauthenticatedProposal.Block.Payset.MsgIsZero() {
zb0004Len--
- zb0004Mask |= 0x80000000
+ zb0004Mask |= 0x100000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero() {
zb0004Len--
- zb0004Mask |= 0x100000000
+ zb0004Mask |= 0x200000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero() {
zb0004Len--
- zb0004Mask |= 0x200000000
+ zb0004Mask |= 0x400000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false {
zb0004Len--
- zb0004Mask |= 0x400000000
+ zb0004Mask |= 0x800000000
}
// variable map header, size zb0004Len
o = msgp.AppendMapHeader(o, zb0004Len)
@@ -1559,32 +1559,32 @@ func (z *proposal) MarshalMsg(b []byte) (o []byte) {
o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate)
}
- if (zb0004Mask & 0x200000) == 0 { // if not empty
+ if (zb0004Mask & 0x400000) == 0 { // if not empty
// string "rnd"
o = append(o, 0xa3, 0x72, 0x6e, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Round.MarshalMsg(o)
}
- if (zb0004Mask & 0x400000) == 0 { // if not empty
+ if (zb0004Mask & 0x800000) == 0 { // if not empty
// string "rwcalr"
o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o)
}
- if (zb0004Mask & 0x800000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000000) == 0 { // if not empty
// string "rwd"
o = append(o, 0xa3, 0x72, 0x77, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MarshalMsg(o)
}
- if (zb0004Mask & 0x1000000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000000) == 0 { // if not empty
// string "sdpf"
o = append(o, 0xa4, 0x73, 0x64, 0x70, 0x66)
o = (*z).unauthenticatedProposal.SeedProof.MarshalMsg(o)
}
- if (zb0004Mask & 0x2000000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000000) == 0 { // if not empty
// string "seed"
o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MarshalMsg(o)
}
- if (zb0004Mask & 0x4000000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000000) == 0 { // if not empty
// string "spt"
o = append(o, 0xa3, 0x73, 0x70, 0x74)
if (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking == nil {
@@ -1604,42 +1604,42 @@ func (z *proposal) MarshalMsg(b []byte) (o []byte) {
o = zb0002.MarshalMsg(o)
}
}
- if (zb0004Mask & 0x8000000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000000) == 0 { // if not empty
// string "tc"
o = append(o, 0xa2, 0x74, 0x63)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter)
}
- if (zb0004Mask & 0x10000000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000000) == 0 { // if not empty
// string "ts"
o = append(o, 0xa2, 0x74, 0x73)
o = msgp.AppendInt64(o, (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp)
}
- if (zb0004Mask & 0x20000000) == 0 { // if not empty
+ if (zb0004Mask & 0x40000000) == 0 { // if not empty
// string "txn"
o = append(o, 0xa3, 0x74, 0x78, 0x6e)
o = (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MarshalMsg(o)
}
- if (zb0004Mask & 0x40000000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000000) == 0 { // if not empty
// string "txn256"
o = append(o, 0xa6, 0x74, 0x78, 0x6e, 0x32, 0x35, 0x36)
o = (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.MarshalMsg(o)
}
- if (zb0004Mask & 0x80000000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000000) == 0 { // if not empty
// string "txns"
o = append(o, 0xa4, 0x74, 0x78, 0x6e, 0x73)
o = (*z).unauthenticatedProposal.Block.Payset.MarshalMsg(o)
}
- if (zb0004Mask & 0x100000000) == 0 { // if not empty
+ if (zb0004Mask & 0x200000000) == 0 { // if not empty
// string "upgradedelay"
o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MarshalMsg(o)
}
- if (zb0004Mask & 0x200000000) == 0 { // if not empty
+ if (zb0004Mask & 0x400000000) == 0 { // if not empty
// string "upgradeprop"
o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MarshalMsg(o)
}
- if (zb0004Mask & 0x400000000) == 0 { // if not empty
+ if (zb0004Mask & 0x800000000) == 0 { // if not empty
// string "upgradeyes"
o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73)
o = msgp.AppendBool(o, (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove)
diff --git a/agreement/proposal.go b/agreement/proposal.go
index 232fa6f5f..ca0d6e678 100644
--- a/agreement/proposal.go
+++ b/agreement/proposal.go
@@ -94,6 +94,11 @@ type proposal struct {
// validated (and thus was ready to be delivered to the state
// machine), relative to the zero of that round.
validatedAt time.Duration
+
+ // receivedAt indicates the time at which this proposal was
+ // delivered to the agreement package (as a messageEvent),
+ // relative to the zero of that round.
+ receivedAt time.Duration
}
func makeProposal(ve ValidatedBlock, pf crypto.VrfProof, origPer period, origProp basics.Address) proposal {
diff --git a/agreement/proposalStore.go b/agreement/proposalStore.go
index 973f909d0..e375ef92f 100644
--- a/agreement/proposalStore.go
+++ b/agreement/proposalStore.go
@@ -18,8 +18,16 @@ package agreement
import (
"fmt"
+
+ "github.com/algorand/go-algorand/util/metrics"
)
+var proposalAlreadyFilledCounter = metrics.MakeCounter(
+ metrics.MetricName{Name: "algod_agreement_proposal_already_filled", Description: "Number of times a duplicate proposal payload was received before validation"})
+
+var proposalAlreadyAssembledCounter = metrics.MakeCounter(
+ metrics.MetricName{Name: "algod_agreement_proposal_already_assembled", Description: "Number of times a duplicate proposal payload was received after validation"})
+
// An blockAssembler contains the proposal data associated with some
// proposal-value.
//
@@ -52,10 +60,12 @@ type blockAssembler struct {
// an error if the pipelining operation is redundant.
func (a blockAssembler) pipeline(p unauthenticatedProposal) (blockAssembler, error) {
if a.Assembled {
+ proposalAlreadyAssembledCounter.Inc(nil)
return a, fmt.Errorf("blockAssembler.pipeline: already assembled")
}
if a.Filled {
+ proposalAlreadyFilledCounter.Inc(nil)
return a, fmt.Errorf("blockAssembler.pipeline: already filled")
}
diff --git a/buildnumber.dat b/buildnumber.dat
index b8626c4cf..573541ac9 100644
--- a/buildnumber.dat
+++ b/buildnumber.dat
@@ -1 +1 @@
-4
+0
diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go
index 5e86404e2..c55b3ea8d 100644
--- a/catchup/catchpointService.go
+++ b/catchup/catchpointService.go
@@ -41,7 +41,7 @@ const (
noPeersAvailableSleepInterval = 50 * time.Millisecond
)
-// CatchpointCatchupNodeServices defines the extenal node support needed
+// CatchpointCatchupNodeServices defines the external node support needed
// for the catchpoint service to switch the node between "regular" operational mode and catchup mode.
type CatchpointCatchupNodeServices interface {
SetCatchpointCatchupMode(bool) (newContextCh <-chan context.Context)
@@ -65,10 +65,10 @@ type CatchpointCatchupStats struct {
type CatchpointCatchupService struct {
// stats is the statistics object, updated async while downloading the ledger
stats CatchpointCatchupStats
- // statsMu syncronizes access to stats, as we could attempt to update it while querying for it's current state
+ // statsMu synchronizes access to stats, as we could attempt to update it while querying for it's current state
statsMu deadlock.Mutex
node CatchpointCatchupNodeServices
- // ctx is the node cancelation context, used when the node is being stopped.
+ // ctx is the node cancellation context, used when the node is being stopped.
ctx context.Context
cancelCtxFunc context.CancelFunc
// running is a waitgroup counting the running goroutine(1), and allow us to exit cleanly.
@@ -79,17 +79,17 @@ type CatchpointCatchupService struct {
stage ledger.CatchpointCatchupState
// log is the logger object
log logging.Logger
- // newService indicates whether this service was created after the node was running ( i.e. true ) or the node just started to find that it was previously perfoming catchup
+ // newService indicates whether this service was created after the node was running ( i.e. true ) or the node just started to find that it was previously performing catchup
newService bool
- // net is the underlaying network module
+ // net is the underlying network module
net network.GossipNode
// ledger points to the ledger object
- ledger *ledger.Ledger
+ ledger ledger.CatchupAccessorClientLedger
// lastBlockHeader is the latest block we have before going into catchpoint catchup mode. We use it to serve the node status requests instead of going to the ledger.
lastBlockHeader bookkeeping.BlockHeader
// config is a copy of the node configuration
config config.Local
- // abortCtx used as a syncronized flag to let us know when the user asked us to abort the catchpoint catchup process. note that it's not being used when we decided to abort
+ // abortCtx used as a synchronized flag to let us know when the user asked us to abort the catchpoint catchup process. note that it's not being used when we decided to abort
// the catchup due to an internal issue ( such as exceeding number of retries )
abortCtx context.Context
abortCtxFunc context.CancelFunc
@@ -98,19 +98,20 @@ type CatchpointCatchupService struct {
}
// MakeResumedCatchpointCatchupService creates a catchpoint catchup service for a node that is already in catchpoint catchup mode
-func MakeResumedCatchpointCatchupService(ctx context.Context, node CatchpointCatchupNodeServices, log logging.Logger, net network.GossipNode, l *ledger.Ledger, cfg config.Local) (service *CatchpointCatchupService, err error) {
+func MakeResumedCatchpointCatchupService(ctx context.Context, node CatchpointCatchupNodeServices, log logging.Logger, net network.GossipNode, accessor ledger.CatchpointCatchupAccessor, cfg config.Local) (service *CatchpointCatchupService, err error) {
service = &CatchpointCatchupService{
stats: CatchpointCatchupStats{
StartTime: time.Now(),
},
node: node,
- ledgerAccessor: ledger.MakeCatchpointCatchupAccessor(l, log),
+ ledgerAccessor: accessor,
log: log,
newService: false,
net: net,
- ledger: l,
+ ledger: accessor.Ledger(),
config: cfg,
}
+ l := accessor.Ledger()
service.lastBlockHeader, err = l.BlockHdr(l.Latest())
if err != nil {
return nil, err
@@ -124,7 +125,7 @@ func MakeResumedCatchpointCatchupService(ctx context.Context, node CatchpointCat
}
// MakeNewCatchpointCatchupService creates a new catchpoint catchup service for a node that is not in catchpoint catchup mode
-func MakeNewCatchpointCatchupService(catchpoint string, node CatchpointCatchupNodeServices, log logging.Logger, net network.GossipNode, l *ledger.Ledger, cfg config.Local) (service *CatchpointCatchupService, err error) {
+func MakeNewCatchpointCatchupService(catchpoint string, node CatchpointCatchupNodeServices, log logging.Logger, net network.GossipNode, accessor ledger.CatchpointCatchupAccessor, cfg config.Local) (service *CatchpointCatchupService, err error) {
if catchpoint == "" {
return nil, fmt.Errorf("MakeNewCatchpointCatchupService: catchpoint is invalid")
}
@@ -134,14 +135,15 @@ func MakeNewCatchpointCatchupService(catchpoint string, node CatchpointCatchupNo
StartTime: time.Now(),
},
node: node,
- ledgerAccessor: ledger.MakeCatchpointCatchupAccessor(l, log),
+ ledgerAccessor: accessor,
stage: ledger.CatchpointCatchupStateInactive,
log: log,
newService: true,
net: net,
- ledger: l,
+ ledger: accessor.Ledger(),
config: cfg,
}
+ l := accessor.Ledger()
service.lastBlockHeader, err = l.BlockHdr(l.Latest())
if err != nil {
return nil, err
@@ -162,7 +164,7 @@ func (cs *CatchpointCatchupService) Start(ctx context.Context) {
func (cs *CatchpointCatchupService) Abort() {
// In order to abort the catchpoint catchup process, we need to first set the flag of abortCtxFunc, and follow that by canceling the main context.
// The order of these calls is crucial : The various stages are blocked on the main context. When that one expires, it uses the abort context to determine
- // if the cancelation meaning that we want to shut down the process, or aborting the catchpoint catchup completly.
+ // if the cancellation meaning that we want to shut down the process, or aborting the catchpoint catchup completely.
cs.abortCtxFunc()
cs.cancelCtxFunc()
}
@@ -200,8 +202,8 @@ func (cs *CatchpointCatchupService) run() {
err = cs.processStageInactive()
case ledger.CatchpointCatchupStateLedgerDownload:
err = cs.processStageLedgerDownload()
- case ledger.CatchpointCatchupStateLastestBlockDownload:
- err = cs.processStageLastestBlockDownload()
+ case ledger.CatchpointCatchupStateLatestBlockDownload:
+ err = cs.processStageLatestBlockDownload()
case ledger.CatchpointCatchupStateBlocksDownload:
err = cs.processStageBlocksDownload()
case ledger.CatchpointCatchupStateSwitch:
@@ -258,7 +260,7 @@ func (cs *CatchpointCatchupService) processStageInactive() (err error) {
return cs.abort(fmt.Errorf("processStageInactive failed to update stage : %v", err))
}
if cs.newService {
- // we need to let the node know that it should shut down all the unneed services to avoid clashes.
+ // we need to let the node know that it should shut down all the unneeded services to avoid clashes.
cs.updateNodeCatchupMode(true)
}
return nil
@@ -272,7 +274,7 @@ func (cs *CatchpointCatchupService) processStageLedgerDownload() (err error) {
round, _, err0 := ledgercore.ParseCatchpointLabel(label)
if err0 != nil {
- return cs.abort(fmt.Errorf("processStageLedgerDownload failed to patse label : %v", err0))
+ return cs.abort(fmt.Errorf("processStageLedgerDownload failed to parse label : %v", err0))
}
// download balances file.
@@ -326,9 +328,9 @@ func (cs *CatchpointCatchupService) processStageLedgerDownload() (err error) {
cs.log.Warnf("unable to download ledger : %v", err)
}
- err = cs.updateStage(ledger.CatchpointCatchupStateLastestBlockDownload)
+ err = cs.updateStage(ledger.CatchpointCatchupStateLatestBlockDownload)
if err != nil {
- return cs.abort(fmt.Errorf("processStageLedgerDownload failed to update stage to CatchpointCatchupStateLastestBlockDownload : %v", err))
+ return cs.abort(fmt.Errorf("processStageLedgerDownload failed to update stage to CatchpointCatchupStateLatestBlockDownload : %v", err))
}
return nil
}
@@ -342,11 +344,11 @@ func (cs *CatchpointCatchupService) updateVerifiedAccounts(addedTrieHashes uint6
}
}
-// processStageLastestBlockDownload is the third catchpoint catchup stage. It downloads the latest block and verify that against the previously downloaded ledger.
-func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err error) {
+// processStageLatestBlockDownload is the third catchpoint catchup stage. It downloads the latest block and verify that against the previously downloaded ledger.
+func (cs *CatchpointCatchupService) processStageLatestBlockDownload() (err error) {
blockRound, err := cs.ledgerAccessor.GetCatchupBlockRound(cs.ctx)
if err != nil {
- return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed to retrieve catchup block round : %v", err))
+ return cs.abort(fmt.Errorf("processStageLatestBlockDownload failed to retrieve catchup block round : %v", err))
}
attemptsCount := 0
@@ -375,7 +377,7 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
// check block protocol version support.
if protoParams, ok = config.Consensus[blk.BlockHeader.CurrentProtocol]; !ok {
- cs.log.Warnf("processStageLastestBlockDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol)
+ cs.log.Warnf("processStageLatestBlockDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol)
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
@@ -383,24 +385,24 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
cs.blocksDownloadPeerSelector.rankPeer(psp, peerRankInvalidDownload)
continue
}
- return cs.abort(fmt.Errorf("processStageLastestBlockDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol))
+ return cs.abort(fmt.Errorf("processStageLatestBlockDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol))
}
- // We need to compare explicitly the genesis hash since we're not doing any block validation. This would ensure the genesis.json file matches the block that we've receieved.
+ // We need to compare explicitly the genesis hash since we're not doing any block validation. This would ensure the genesis.json file matches the block that we've received.
if protoParams.SupportGenesisHash && blk.GenesisHash() != cs.ledger.GenesisHash() {
- cs.log.Warnf("processStageLastestBlockDownload: genesis hash mismatches : genesis hash on genesis.json file is %v while genesis hash of downloaded block is %v", cs.ledger.GenesisHash(), blk.GenesisHash())
+ cs.log.Warnf("processStageLatestBlockDownload: genesis hash mismatches : genesis hash on genesis.json file is %v while genesis hash of downloaded block is %v", cs.ledger.GenesisHash(), blk.GenesisHash())
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
blk = nil
cs.blocksDownloadPeerSelector.rankPeer(psp, peerRankInvalidDownload)
continue
}
- return cs.abort(fmt.Errorf("processStageLastestBlockDownload: genesis hash mismatches : genesis hash on genesis.json file is %v while genesis hash of downloaded block is %v", cs.ledger.GenesisHash(), blk.GenesisHash()))
+ return cs.abort(fmt.Errorf("processStageLatestBlockDownload: genesis hash mismatches : genesis hash on genesis.json file is %v while genesis hash of downloaded block is %v", cs.ledger.GenesisHash(), blk.GenesisHash()))
}
// check to see that the block header and the block payset aligns
if !blk.ContentsMatchHeader() {
- cs.log.Warnf("processStageLastestBlockDownload: downloaded block content does not match downloaded block header")
+ cs.log.Warnf("processStageLatestBlockDownload: downloaded block content does not match downloaded block header")
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
@@ -408,7 +410,7 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
cs.blocksDownloadPeerSelector.rankPeer(psp, peerRankInvalidDownload)
continue
}
- return cs.abort(fmt.Errorf("processStageLastestBlockDownload: downloaded block content does not match downloaded block header"))
+ return cs.abort(fmt.Errorf("processStageLatestBlockDownload: downloaded block content does not match downloaded block header"))
}
// verify that the catchpoint is valid.
@@ -420,15 +422,18 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
blk = nil
- cs.log.Infof("processStageLastestBlockDownload: block %d verification against catchpoint failed, another attempt will be made; err = %v", blockRound, err)
+ cs.log.Infof("processStageLatestBlockDownload: block %d verification against catchpoint failed, another attempt will be made; err = %v", blockRound, err)
cs.blocksDownloadPeerSelector.rankPeer(psp, peerRankInvalidDownload)
continue
}
- return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed when calling VerifyCatchpoint : %v", err))
+ return cs.abort(fmt.Errorf("processStageLatestBlockDownload failed when calling VerifyCatchpoint : %v", err))
+ }
+ if psp != nil {
+ // give a rank to the download, as the download was successful.
+ // if the block might have been retrieved from the local ledger, nothing to rank
+ peerRank := cs.blocksDownloadPeerSelector.peerDownloadDurationToRank(psp, blockDownloadDuration)
+ cs.blocksDownloadPeerSelector.rankPeer(psp, peerRank)
}
- // give a rank to the download, as the download was successful.
- peerRank := cs.blocksDownloadPeerSelector.peerDownloadDurationToRank(psp, blockDownloadDuration)
- cs.blocksDownloadPeerSelector.rankPeer(psp, peerRank)
err = cs.ledgerAccessor.StoreBalancesRound(cs.ctx, blk)
if err != nil {
@@ -437,7 +442,7 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
blk = nil
continue
}
- return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed when calling StoreBalancesRound : %v", err))
+ return cs.abort(fmt.Errorf("processStageLatestBlockDownload failed when calling StoreBalancesRound : %v", err))
}
err = cs.ledgerAccessor.StoreFirstBlock(cs.ctx, blk)
@@ -447,7 +452,7 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
blk = nil
continue
}
- return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed when calling StoreFirstBlock : %v", err))
+ return cs.abort(fmt.Errorf("processStageLatestBlockDownload failed when calling StoreFirstBlock : %v", err))
}
err = cs.updateStage(ledger.CatchpointCatchupStateBlocksDownload)
@@ -457,7 +462,7 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
blk = nil
continue
}
- return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed to update stage : %v", err))
+ return cs.abort(fmt.Errorf("processStageLatestBlockDownload failed to update stage : %v", err))
}
// great ! everything is ready for next stage.
@@ -466,7 +471,7 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
return nil
}
-// lookbackForStateproofsSupport calculates the lookback (from topblock round) needed to be downloaded
+// lookbackForStateproofsSupport calculates the lookback (from topBlock round) needed to be downloaded
// in order to support state proofs verification.
func lookbackForStateproofsSupport(topBlock *bookkeeping.Block) uint64 {
proto := config.Consensus[topBlock.CurrentProtocol]
@@ -764,10 +769,10 @@ func (cs *CatchpointCatchupService) GetStatistics() (out CatchpointCatchupStats)
}
// updateBlockRetrievalStatistics updates the blocks retrieval statistics by applying the provided deltas
-func (cs *CatchpointCatchupService) updateBlockRetrievalStatistics(aquiredBlocksDelta, verifiedBlocksDelta int64) {
+func (cs *CatchpointCatchupService) updateBlockRetrievalStatistics(acquiredBlocksDelta, verifiedBlocksDelta int64) {
cs.statsMu.Lock()
defer cs.statsMu.Unlock()
- cs.stats.AcquiredBlocks = uint64(int64(cs.stats.AcquiredBlocks) + aquiredBlocksDelta)
+ cs.stats.AcquiredBlocks = uint64(int64(cs.stats.AcquiredBlocks) + acquiredBlocksDelta)
cs.stats.VerifiedBlocks = uint64(int64(cs.stats.VerifiedBlocks) + verifiedBlocksDelta)
}
diff --git a/catchup/catchpointService_test.go b/catchup/catchpointService_test.go
new file mode 100644
index 000000000..02f4a9b7a
--- /dev/null
+++ b/catchup/catchpointService_test.go
@@ -0,0 +1,91 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package catchup
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/components/mocks"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+type catchpointCatchupLedger struct {
+}
+
+func (l *catchpointCatchupLedger) Block(rnd basics.Round) (blk bookkeeping.Block, err error) {
+ blk = bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: protocol.ConsensusCurrentVersion,
+ },
+ },
+ }
+ commitments, err := blk.PaysetCommit()
+ if err != nil {
+ return blk, err
+ }
+ blk.TxnCommitments = commitments
+
+ return blk, nil
+}
+
+func (l *catchpointCatchupLedger) GenesisHash() (d crypto.Digest) {
+ return
+}
+
+func (l *catchpointCatchupLedger) BlockHdr(rnd basics.Round) (blk bookkeeping.BlockHeader, err error) {
+ return
+}
+
+func (l *catchpointCatchupLedger) Latest() (rnd basics.Round) {
+ return
+}
+
+type catchpointCatchupAccessorMock struct {
+ mocks.MockCatchpointCatchupAccessor
+ l *catchpointCatchupLedger
+}
+
+func (m *catchpointCatchupAccessorMock) GetCatchupBlockRound(ctx context.Context) (round basics.Round, err error) {
+ return 1, nil
+}
+
+func (m *catchpointCatchupAccessorMock) Ledger() (l ledger.CatchupAccessorClientLedger) {
+ return m.l
+}
+
+// TestCatchpointServicePeerRank ensures CatchpointService does not crash when a block fetched
+// from the local ledger and not from network when ranking a peer
+func TestCatchpointServicePeerRank(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ l := catchpointCatchupLedger{}
+ a := catchpointCatchupAccessorMock{l: &l}
+ cs := CatchpointCatchupService{ledgerAccessor: &a, ledger: &l}
+ cs.initDownloadPeerSelector()
+
+ err := cs.processStageLatestBlockDownload()
+ require.NoError(t, err)
+}
diff --git a/catchup/ledgerFetcher.go b/catchup/ledgerFetcher.go
index afc39414d..fa965a154 100644
--- a/catchup/ledgerFetcher.go
+++ b/catchup/ledgerFetcher.go
@@ -36,14 +36,14 @@ import (
"github.com/algorand/go-algorand/util"
)
-var errNoLedgerForRound = errors.New("No ledger available for given round")
+var errNoLedgerForRound = errors.New("no ledger available for given round")
const (
// maxCatchpointFileChunkSize is a rough estimate for the worst-case scenario we're going to have of all the accounts data per a single catchpoint file chunk.
maxCatchpointFileChunkSize = ledger.BalancesPerCatchpointFileChunk * basics.MaxEncodedAccountDataSize
// defaultMinCatchpointFileDownloadBytesPerSecond defines the worst-case scenario download speed we expect to get while downloading a catchpoint file
defaultMinCatchpointFileDownloadBytesPerSecond = 20 * 1024
- // catchpointFileStreamReadSize defines the number of bytes we would attempt to read at each itration from the incoming http data stream
+ // catchpointFileStreamReadSize defines the number of bytes we would attempt to read at each iteration from the incoming http data stream
catchpointFileStreamReadSize = 4096
)
@@ -114,7 +114,7 @@ func (lf *ledgerFetcher) getPeerLedger(ctx context.Context, peer network.HTTPPee
return fmt.Errorf("getPeerLedger error response status code %d", response.StatusCode)
}
- // at this point, we've already receieved the response headers. ensure that the
+ // at this point, we've already received the response headers. ensure that the
// response content type is what we'd like it to be.
contentTypes := response.Header["Content-Type"]
if len(contentTypes) != 1 {
diff --git a/catchup/ledgerFetcher_test.go b/catchup/ledgerFetcher_test.go
index 637064c97..4cb57d7fd 100644
--- a/catchup/ledgerFetcher_test.go
+++ b/catchup/ledgerFetcher_test.go
@@ -30,7 +30,6 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -44,8 +43,7 @@ func TestNoPeersAvailable(t *testing.T) {
partitiontest.PartitionTest(t)
lf := makeLedgerFetcher(&mocks.MockNetwork{}, &mocks.MockCatchpointCatchupAccessor{}, logging.TestingLog(t), &dummyLedgerFetcherReporter{}, config.GetDefaultLocal())
- var peer network.Peer
- peer = &lf // The peer is an opaque interface.. we can add anything as a Peer.
+ peer := &lf // The peer is an opaque interface.. we can add anything as a Peer.
err := lf.downloadLedger(context.Background(), peer, basics.Round(0))
require.Equal(t, errNonHTTPPeer, err)
}
diff --git a/catchup/networkFetcher.go b/catchup/networkFetcher.go
new file mode 100644
index 000000000..d82395e8d
--- /dev/null
+++ b/catchup/networkFetcher.go
@@ -0,0 +1,134 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package catchup
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/network"
+)
+
+// NetworkFetcher is the struct used to export fetchBlock function from universalFetcher
+type NetworkFetcher struct {
+ log logging.Logger
+ cfg config.Local
+ auth BlockAuthenticator
+ peerSelector *peerSelector
+ fetcher *universalBlockFetcher
+}
+
+// MakeNetworkFetcher initializes a NetworkFetcher service
+func MakeNetworkFetcher(log logging.Logger, net network.GossipNode, cfg config.Local, auth BlockAuthenticator, pipelineFetch bool) *NetworkFetcher {
+ netFetcher := &NetworkFetcher{
+ log: log,
+ cfg: cfg,
+ auth: auth,
+ peerSelector: createPeerSelector(net, cfg, pipelineFetch),
+ fetcher: makeUniversalBlockFetcher(log, net, cfg),
+ }
+ return netFetcher
+}
+
+func (netFetcher *NetworkFetcher) getHTTPPeer() (network.HTTPPeer, *peerSelectorPeer, error) {
+ for retryCount := 0; retryCount < netFetcher.cfg.CatchupBlockDownloadRetryAttempts; retryCount++ {
+ psp, err := netFetcher.peerSelector.getNextPeer()
+ if err != nil {
+ if err != errPeerSelectorNoPeerPoolsAvailable {
+ err = fmt.Errorf("FetchBlock: unable to obtain a list of peers to download the block from : %w", err)
+ return nil, nil, err
+ }
+ // this is a possible on startup, since the network package might have yet to retrieve the list of peers.
+ netFetcher.log.Infof("FetchBlock: unable to obtain a list of peers to download the block from; will retry shortly.")
+ time.Sleep(noPeersAvailableSleepInterval)
+ continue
+ }
+ peer := psp.Peer
+ httpPeer, ok := peer.(network.HTTPPeer)
+ if ok {
+ return httpPeer, psp, nil
+ }
+ netFetcher.log.Warnf("FetchBlock: non-HTTP peer was provided by the peer selector")
+ netFetcher.peerSelector.rankPeer(psp, peerRankInvalidDownload)
+ }
+ return nil, nil, errors.New("FetchBlock: recurring non-HTTP peer was provided by the peer selector")
+}
+
+// FetchBlock function given a round number returns a block from a http peer
+func (netFetcher *NetworkFetcher) FetchBlock(ctx context.Context, round basics.Round) (*bookkeeping.Block,
+ *agreement.Certificate, time.Duration, error) {
+ // internal retry attempt to fetch the block
+ for retryCount := 0; retryCount < netFetcher.cfg.CatchupBlockDownloadRetryAttempts; retryCount++ {
+ httpPeer, psp, err := netFetcher.getHTTPPeer()
+ if err != nil {
+ return nil, nil, time.Duration(0), err
+ }
+
+ blk, cert, downloadDuration, err := netFetcher.fetcher.fetchBlock(ctx, round, httpPeer)
+ if err != nil {
+ if ctx.Err() != nil {
+ // caller of the function decided to cancel the download
+ return nil, nil, time.Duration(0), err
+ }
+ netFetcher.log.Infof("FetchBlock: failed to download block %d on attempt %d out of %d. %v",
+ round, retryCount+1, netFetcher.cfg.CatchupBlockDownloadRetryAttempts, err)
+ netFetcher.peerSelector.rankPeer(psp, peerRankDownloadFailed)
+ continue // retry the fetch
+ }
+
+ // Check that the block's contents match the block header
+ if !blk.ContentsMatchHeader() && blk.Round() > 0 {
+ netFetcher.peerSelector.rankPeer(psp, peerRankInvalidDownload)
+ // Check if this mismatch is due to an unsupported protocol version
+ if _, ok := config.Consensus[blk.BlockHeader.CurrentProtocol]; !ok {
+ netFetcher.log.Errorf("FetchBlock: downloaded block(%v) unsupported protocol version detected: '%v'",
+ round, blk.BlockHeader.CurrentProtocol)
+ }
+ netFetcher.log.Warnf("FetchBlock: downloaded block(%v) contents do not match header", round)
+ netFetcher.log.Infof("FetchBlock: failed to download block %d on attempt %d out of %d. %v",
+ round, retryCount+1, netFetcher.cfg.CatchupBlockDownloadRetryAttempts, err)
+ continue // retry the fetch
+ }
+
+ // Authenticate the block. for correct execution, caller should call FetchBlock only when the lookback block is available
+ if netFetcher.cfg.CatchupVerifyCertificate() {
+ err = netFetcher.auth.Authenticate(blk, cert)
+ if err != nil {
+ netFetcher.log.Warnf("FetchBlock: cert authenticatation failed for block %d on attempt %d out of %d. %v",
+ round, retryCount+1, netFetcher.cfg.CatchupBlockDownloadRetryAttempts, err)
+ netFetcher.peerSelector.rankPeer(psp, peerRankInvalidDownload)
+ continue // retry the fetch
+ }
+ }
+
+ // upon successful download rank the peer according to the download speed
+ peerRank := netFetcher.peerSelector.peerDownloadDurationToRank(psp, downloadDuration)
+ netFetcher.peerSelector.rankPeer(psp, peerRank)
+ return blk, cert, downloadDuration, err
+
+ }
+ err := fmt.Errorf("FetchBlock failed after multiple blocks download attempts: %v unsuccessful attempts",
+ netFetcher.cfg.CatchupBlockDownloadRetryAttempts)
+ return nil, nil, time.Duration(0), err
+}
diff --git a/catchup/networkFetcher_test.go b/catchup/networkFetcher_test.go
new file mode 100644
index 000000000..7c6a2c885
--- /dev/null
+++ b/catchup/networkFetcher_test.go
@@ -0,0 +1,190 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package catchup
+
+import (
+ "context"
+ "sync"
+ "testing"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/rpcs"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestFetchBlock(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ledger, next, b, err := buildTestLedger(t, bookkeeping.Block{})
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+
+ blockServiceConfig := config.GetDefaultLocal()
+ blockServiceConfig.EnableBlockService = true
+ blockServiceConfig.EnableBlockServiceFallbackToArchiver = false
+
+ net := &httpTestPeerSource{}
+ ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, ledger, net, "test genesisID")
+
+ node := basicRPCNode{}
+ node.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
+ node.start()
+ defer node.stop()
+ rootURL := node.rootURL()
+
+ net.addPeer(rootURL)
+
+ // Disable block authentication
+ cfg := config.GetDefaultLocal()
+ cfg.CatchupBlockValidateMode = 1
+ fetcher := MakeNetworkFetcher(logging.TestingLog(t), net, cfg, nil, false)
+
+ block, _, duration, err := fetcher.FetchBlock(context.Background(), next)
+
+ require.NoError(t, err)
+ require.Equal(t, &b, block)
+ require.GreaterOrEqual(t, int64(duration), int64(0))
+
+ block, cert, duration, err := fetcher.FetchBlock(context.Background(), next+1)
+
+ require.Error(t, errNoBlockForRound, err)
+ require.Contains(t, err.Error(), "FetchBlock failed after multiple blocks download attempts")
+ require.Nil(t, block)
+ require.Nil(t, cert)
+ require.Equal(t, int64(duration), int64(0))
+}
+
+func TestConcurrentAttemptsToFetchBlockSuccess(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ledger, next, b, err := buildTestLedger(t, bookkeeping.Block{})
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+
+ blockServiceConfig := config.GetDefaultLocal()
+ blockServiceConfig.EnableBlockService = true
+ blockServiceConfig.EnableBlockServiceFallbackToArchiver = false
+
+ net := &httpTestPeerSource{}
+ ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, ledger, net, "test genesisID")
+
+ node := basicRPCNode{}
+ node.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
+ node.start()
+ defer node.stop()
+ rootURL := node.rootURL()
+
+ net.addPeer(rootURL)
+
+ // Disable block authentication
+ cfg := config.GetDefaultLocal()
+ cfg.CatchupBlockValidateMode = 1
+ fetcher := MakeNetworkFetcher(logging.TestingLog(t), net, cfg, nil, false)
+
+ // start is used to synchronize concurrent fetchBlock attempts
+ // parallelRequests represents number of concurrent attempts
+ start := make(chan struct{})
+ parallelRequests := int(cfg.CatchupParallelBlocks)
+ var wg sync.WaitGroup
+ wg.Add(parallelRequests)
+ for i := 0; i < parallelRequests; i++ {
+ go func() {
+ <-start
+ block, _, duration, err := fetcher.FetchBlock(context.Background(), next)
+ require.NoError(t, err)
+ require.Equal(t, &b, block)
+ require.GreaterOrEqual(t, int64(duration), int64(0))
+ wg.Done()
+ }()
+ }
+ close(start)
+ wg.Wait()
+}
+
+func TestHTTPPeerNotAvailable(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ net := &httpTestPeerSource{}
+
+ // Disable block authentication
+ cfg := config.GetDefaultLocal()
+ cfg.CatchupBlockValidateMode = 1
+ cfg.CatchupBlockDownloadRetryAttempts = 1
+
+ fetcher := MakeNetworkFetcher(logging.TestingLog(t), net, cfg, nil, false)
+
+ _, _, _, err := fetcher.FetchBlock(context.Background(), 1)
+ require.Contains(t, err.Error(), "recurring non-HTTP peer was provided by the peer selector")
+}
+
+func TestFetchBlockFailed(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ net := &httpTestPeerSource{}
+ wsPeer := makeTestUnicastPeer(net, t)
+ net.addPeer(wsPeer.GetAddress())
+
+ // Disable block authentication
+ cfg := config.GetDefaultLocal()
+ cfg.CatchupBlockValidateMode = 1
+ cfg.CatchupBlockDownloadRetryAttempts = 1
+
+ fetcher := MakeNetworkFetcher(logging.TestingLog(t), net, cfg, nil, false)
+
+ _, _, _, err := fetcher.FetchBlock(context.Background(), 1)
+ require.Contains(t, err.Error(), "FetchBlock failed after multiple blocks download attempts")
+}
+
+func TestFetchBlockAuthenticationFailed(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ledger, next, _, err := buildTestLedger(t, bookkeeping.Block{})
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+
+ blockServiceConfig := config.GetDefaultLocal()
+ blockServiceConfig.EnableBlockService = true
+ blockServiceConfig.EnableBlockServiceFallbackToArchiver = false
+
+ net := &httpTestPeerSource{}
+ ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, ledger, net, "test genesisID")
+
+ node := basicRPCNode{}
+ node.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
+ node.start()
+ defer node.stop()
+ rootURL := node.rootURL()
+
+ net.addPeer(rootURL)
+
+ cfg := config.GetDefaultLocal()
+ cfg.CatchupBlockDownloadRetryAttempts = 1
+
+ fetcher := MakeNetworkFetcher(logging.TestingLog(t), net, cfg, &mockedAuthenticator{errorRound: int(next)}, false)
+
+ _, _, _, err = fetcher.FetchBlock(context.Background(), next)
+ require.Contains(t, err.Error(), "FetchBlock failed after multiple blocks download attempts")
+}
diff --git a/catchup/service.go b/catchup/service.go
index adc313db6..1ebaf0fd3 100644
--- a/catchup/service.go
+++ b/catchup/service.go
@@ -425,7 +425,7 @@ func (s *Service) pipelinedFetch(seedLookback uint64) {
close(completed)
}()
- peerSelector := s.createPeerSelector(true)
+ peerSelector := createPeerSelector(s.net, s.cfg, true)
if _, err := peerSelector.getNextPeer(); err == errPeerSelectorNoPeerPoolsAvailable {
s.log.Debugf("pipelinedFetch: was unable to obtain a peer to retrieve the block from")
@@ -653,7 +653,7 @@ func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.Asy
}
blockHash := bookkeeping.BlockHash(cert.Proposal.BlockDigest) // semantic digest (i.e., hash of the block header), not byte-for-byte digest
- peerSelector := s.createPeerSelector(false)
+ peerSelector := createPeerSelector(s.net, s.cfg, false)
for s.ledger.LastRound() < cert.Round {
psp, getPeerErr := peerSelector.getNextPeer()
if getPeerErr != nil {
@@ -755,11 +755,11 @@ func (s *Service) handleUnsupportedRound(nextUnsupportedRound basics.Round) {
}
}
-func (s *Service) createPeerSelector(pipelineFetch bool) *peerSelector {
+func createPeerSelector(net network.GossipNode, cfg config.Local, pipelineFetch bool) *peerSelector {
var peerClasses []peerClass
- if s.cfg.EnableCatchupFromArchiveServers {
+ if cfg.EnableCatchupFromArchiveServers {
if pipelineFetch {
- if s.cfg.NetAddress != "" { // Relay node
+ if cfg.NetAddress != "" { // Relay node
peerClasses = []peerClass{
{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivers},
@@ -774,7 +774,7 @@ func (s *Service) createPeerSelector(pipelineFetch bool) *peerSelector {
}
}
} else {
- if s.cfg.NetAddress != "" { // Relay node
+ if cfg.NetAddress != "" { // Relay node
peerClasses = []peerClass{
{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedIn},
@@ -791,7 +791,7 @@ func (s *Service) createPeerSelector(pipelineFetch bool) *peerSelector {
}
} else {
if pipelineFetch {
- if s.cfg.NetAddress != "" { // Relay node
+ if cfg.NetAddress != "" { // Relay node
peerClasses = []peerClass{
{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
@@ -804,7 +804,7 @@ func (s *Service) createPeerSelector(pipelineFetch bool) *peerSelector {
}
}
} else {
- if s.cfg.NetAddress != "" { // Relay node
+ if cfg.NetAddress != "" { // Relay node
peerClasses = []peerClass{
{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedIn},
@@ -818,5 +818,5 @@ func (s *Service) createPeerSelector(pipelineFetch bool) *peerSelector {
}
}
}
- return makePeerSelector(s.net, peerClasses)
+ return makePeerSelector(net, peerClasses)
}
diff --git a/catchup/service_test.go b/catchup/service_test.go
index 676a283ba..f364b7a46 100644
--- a/catchup/service_test.go
+++ b/catchup/service_test.go
@@ -834,7 +834,7 @@ func TestCreatePeerSelector(t *testing.T) {
cfg.NetAddress = "someAddress"
s := MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps := s.createPeerSelector(true)
+ ps := createPeerSelector(s.net, s.cfg, true)
require.Equal(t, 4, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
require.Equal(t, peerRankInitialSecondPriority, ps.peerClasses[1].initialRank)
@@ -850,7 +850,7 @@ func TestCreatePeerSelector(t *testing.T) {
cfg.EnableCatchupFromArchiveServers = true
cfg.NetAddress = ""
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps = s.createPeerSelector(true)
+ ps = createPeerSelector(s.net, s.cfg, true)
require.Equal(t, 3, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
require.Equal(t, peerRankInitialSecondPriority, ps.peerClasses[1].initialRank)
@@ -864,7 +864,7 @@ func TestCreatePeerSelector(t *testing.T) {
cfg.EnableCatchupFromArchiveServers = true
cfg.NetAddress = "someAddress"
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps = s.createPeerSelector(false)
+ ps = createPeerSelector(s.net, s.cfg, false)
require.Equal(t, 4, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
@@ -881,7 +881,7 @@ func TestCreatePeerSelector(t *testing.T) {
cfg.EnableCatchupFromArchiveServers = true
cfg.NetAddress = ""
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps = s.createPeerSelector(false)
+ ps = createPeerSelector(s.net, s.cfg, false)
require.Equal(t, 3, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
@@ -896,7 +896,7 @@ func TestCreatePeerSelector(t *testing.T) {
cfg.EnableCatchupFromArchiveServers = false
cfg.NetAddress = "someAddress"
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps = s.createPeerSelector(true)
+ ps = createPeerSelector(s.net, s.cfg, true)
require.Equal(t, 3, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
@@ -911,7 +911,7 @@ func TestCreatePeerSelector(t *testing.T) {
cfg.EnableCatchupFromArchiveServers = false
cfg.NetAddress = ""
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps = s.createPeerSelector(true)
+ ps = createPeerSelector(s.net, s.cfg, true)
require.Equal(t, 2, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
@@ -924,7 +924,7 @@ func TestCreatePeerSelector(t *testing.T) {
cfg.EnableCatchupFromArchiveServers = false
cfg.NetAddress = "someAddress"
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps = s.createPeerSelector(false)
+ ps = createPeerSelector(s.net, s.cfg, false)
require.Equal(t, 3, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
@@ -939,7 +939,7 @@ func TestCreatePeerSelector(t *testing.T) {
cfg.EnableCatchupFromArchiveServers = false
cfg.NetAddress = ""
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps = s.createPeerSelector(false)
+ ps = createPeerSelector(s.net, s.cfg, false)
require.Equal(t, 2, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go
index 59159751b..4527c2f5e 100644
--- a/catchup/universalFetcher.go
+++ b/catchup/universalFetcher.go
@@ -187,8 +187,8 @@ func (w *wsFetcherClient) requestBlock(ctx context.Context, round basics.Round)
return blockCertBytes, nil
}
-// set max fetcher size to 5MB, this is enough to fit the block and certificate
-const fetcherMaxBlockBytes = 5 << 20
+// set max fetcher size to 10MB, this is enough to fit the block and certificate
+const fetcherMaxBlockBytes = 10 << 20
var errNoBlockForRound = errors.New("No block available for given round")
diff --git a/cmd/algod/main.go b/cmd/algod/main.go
index b0a45bc6a..b67747984 100644
--- a/cmd/algod/main.go
+++ b/cmd/algod/main.go
@@ -19,7 +19,6 @@ package main
import (
"flag"
"fmt"
- "io/ioutil"
"math/rand"
"os"
"path/filepath"
@@ -119,7 +118,7 @@ func run() int {
}
// Load genesis
- genesisText, err := ioutil.ReadFile(genesisPath)
+ genesisText, err := os.ReadFile(genesisPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot read genesis file %s: %v\n", genesisPath, err)
return 1
@@ -328,12 +327,17 @@ func run() int {
}
currentVersion := config.GetCurrentVersion()
+ var overrides []telemetryspec.NameValue
+ for name, val := range config.GetNonDefaultConfigValues(cfg, startupConfigCheckFields) {
+ overrides = append(overrides, telemetryspec.NameValue{Name: name, Value: val})
+ }
startupDetails := telemetryspec.StartupEventDetails{
Version: currentVersion.String(),
CommitHash: currentVersion.CommitHash,
Branch: currentVersion.Branch,
Channel: currentVersion.Channel,
InstanceHash: crypto.Hash([]byte(absolutePath)).String(),
+ Overrides: overrides,
}
log.EventWithDetails(telemetryspec.ApplicationState, telemetryspec.StartupEvent, startupDetails)
@@ -370,6 +374,30 @@ func run() int {
return 0
}
+var startupConfigCheckFields = []string{
+ "AgreementIncomingBundlesQueueLength",
+ "AgreementIncomingProposalsQueueLength",
+ "AgreementIncomingVotesQueueLength",
+ "BroadcastConnectionsLimit",
+ "CatchupBlockValidateMode",
+ "ConnectionsRateLimitingCount",
+ "ConnectionsRateLimitingWindowSeconds",
+ "GossipFanout",
+ "IncomingConnectionsLimit",
+ "IncomingMessageFilterBucketCount",
+ "IncomingMessageFilterBucketSize",
+ "LedgerSynchronousMode",
+ "MaxAcctLookback",
+ "MaxConnectionsPerIP",
+ "OutgoingMessageFilterBucketCount",
+ "OutgoingMessageFilterBucketSize",
+ "ProposalAssemblyTime",
+ "ReservedFDs",
+ "TxPoolExponentialIncreaseFactor",
+ "TxPoolSize",
+ "VerifiedTranscationsCacheSize",
+}
+
func resolveDataDir() string {
// Figure out what data directory to tell algod to use.
// If not specified on cmdline with '-d', look for default in environment.
diff --git a/cmd/algod/main_test.go b/cmd/algod/main_test.go
index 13fa72092..c25505167 100644
--- a/cmd/algod/main_test.go
+++ b/cmd/algod/main_test.go
@@ -18,7 +18,6 @@ package main
import (
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"testing"
@@ -30,7 +29,7 @@ import (
func BenchmarkAlgodStartup(b *testing.B) {
tmpDir := b.TempDir()
- genesisFile, err := ioutil.ReadFile("../../installer/genesis/devnet/genesis.json")
+ genesisFile, err := os.ReadFile("../../installer/genesis/devnet/genesis.json")
require.NoError(b, err)
dataDirectory = &tmpDir
@@ -38,7 +37,7 @@ func BenchmarkAlgodStartup(b *testing.B) {
initAndExit = &bInitAndExit
b.StartTimer()
for n := 0; n < b.N; n++ {
- err := ioutil.WriteFile(filepath.Join(tmpDir, config.GenesisJSONFile), genesisFile, 0766)
+ err := os.WriteFile(filepath.Join(tmpDir, config.GenesisJSONFile), genesisFile, 0766)
require.NoError(b, err)
fmt.Printf("file %s was written\n", filepath.Join(tmpDir, config.GenesisJSONFile))
run()
diff --git a/cmd/algofix/main.go b/cmd/algofix/main.go
index 66585e42c..09df52492 100644
--- a/cmd/algofix/main.go
+++ b/cmd/algofix/main.go
@@ -13,7 +13,7 @@ import (
"go/parser"
"go/scanner"
"go/token"
- "io/ioutil"
+ "io"
"os"
"os/exec"
"path/filepath"
@@ -135,7 +135,7 @@ func processFile(filename string, useStdin bool) error {
defer f.Close()
}
- src, err := ioutil.ReadAll(f)
+ src, err := io.ReadAll(f)
if err != nil {
return err
}
@@ -209,7 +209,7 @@ func processFile(filename string, useStdin bool) error {
}
fixedSome = true
- return ioutil.WriteFile(f.Name(), newSrc, 0)
+ return os.WriteFile(f.Name(), newSrc, 0)
}
var gofmtBuf bytes.Buffer
@@ -248,7 +248,7 @@ func isGoFile(f os.FileInfo) bool {
}
func writeTempFile(dir, prefix string, data []byte) (string, error) {
- file, err := ioutil.TempFile(dir, prefix)
+ file, err := os.CreateTemp(dir, prefix)
if err != nil {
return "", err
}
diff --git a/cmd/algofix/typecheck.go b/cmd/algofix/typecheck.go
index 4550fe4f9..2b55355a2 100644
--- a/cmd/algofix/typecheck.go
+++ b/cmd/algofix/typecheck.go
@@ -9,7 +9,6 @@ import (
"go/ast"
"go/parser"
"go/token"
- "io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -161,12 +160,12 @@ func typecheck(cfg *TypeConfig, f *ast.File) (typeof map[interface{}]string, ass
if err != nil {
return err
}
- dir, err := ioutil.TempDir(os.TempDir(), "fix_cgo_typecheck")
+ dir, err := os.MkdirTemp(os.TempDir(), "fix_cgo_typecheck")
if err != nil {
return err
}
defer os.RemoveAll(dir)
- err = ioutil.WriteFile(filepath.Join(dir, "in.go"), txt, 0600)
+ err = os.WriteFile(filepath.Join(dir, "in.go"), txt, 0600)
if err != nil {
return err
}
@@ -175,7 +174,7 @@ func typecheck(cfg *TypeConfig, f *ast.File) (typeof map[interface{}]string, ass
if err != nil {
return err
}
- out, err := ioutil.ReadFile(filepath.Join(dir, "_cgo_gotypes.go"))
+ out, err := os.ReadFile(filepath.Join(dir, "_cgo_gotypes.go"))
if err != nil {
return err
}
diff --git a/cmd/algoh/main.go b/cmd/algoh/main.go
index a458929aa..4e75ae837 100644
--- a/cmd/algoh/main.go
+++ b/cmd/algoh/main.go
@@ -19,7 +19,6 @@ package main
import (
"flag"
"fmt"
- "io/ioutil"
"os"
"os/exec"
"os/signal"
@@ -351,8 +350,8 @@ func captureErrorLogs(algohConfig algoh.HostConfig, errorOutput stdCollector, ou
log.EventWithDetails(telemetryspec.HostApplicationState, telemetryspec.ErrorOutputEvent, details)
// Write stdout & stderr streams to disk
- _ = ioutil.WriteFile(filepath.Join(absolutePath, nodecontrol.StdOutFilename), []byte(output.output), os.ModePerm)
- _ = ioutil.WriteFile(filepath.Join(absolutePath, nodecontrol.StdErrFilename), []byte(errorOutput.output), os.ModePerm)
+ _ = os.WriteFile(filepath.Join(absolutePath, nodecontrol.StdOutFilename), []byte(output.output), os.ModePerm)
+ _ = os.WriteFile(filepath.Join(absolutePath, nodecontrol.StdErrFilename), []byte(errorOutput.output), os.ModePerm)
}
if errorCondition && algohConfig.UploadOnError {
fmt.Fprintf(os.Stdout, "Uploading logs...\n")
diff --git a/cmd/algokey/common.go b/cmd/algokey/common.go
index 37f4e1ca4..9362fab74 100644
--- a/cmd/algokey/common.go
+++ b/cmd/algokey/common.go
@@ -18,7 +18,7 @@ package main
import (
"fmt"
- "io/ioutil"
+ "io"
"os"
"github.com/algorand/go-algorand/crypto"
@@ -63,7 +63,7 @@ func loadMnemonic(mnemonic string) crypto.Seed {
}
func loadKeyfile(keyfile string) crypto.Seed {
- seedbytes, err := ioutil.ReadFile(keyfile)
+ seedbytes, err := os.ReadFile(keyfile)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot read key seed from %s: %v\n", keyfile, err)
os.Exit(1)
@@ -75,7 +75,7 @@ func loadKeyfile(keyfile string) crypto.Seed {
}
func writePrivateKey(keyfile string, seed crypto.Seed) {
- err := ioutil.WriteFile(keyfile, seed[:], 0600)
+ err := os.WriteFile(keyfile, seed[:], 0600)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot write key to %s: %v\n", keyfile, err)
os.Exit(1)
@@ -84,7 +84,7 @@ func writePrivateKey(keyfile string, seed crypto.Seed) {
func writePublicKey(pubkeyfile string, checksummed string) {
data := fmt.Sprintf("%s\n", checksummed)
- err := ioutil.WriteFile(pubkeyfile, []byte(data), 0666)
+ err := os.WriteFile(pubkeyfile, []byte(data), 0666)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot write public key to %s: %v\n", pubkeyfile, err)
os.Exit(1)
@@ -100,7 +100,7 @@ func computeMnemonic(seed crypto.Seed) string {
return mnemonic
}
-// writeFile is a wrapper of ioutil.WriteFile which considers the special
+// writeFile is a wrapper of os.WriteFile which considers the special
// case of stdout filename
func writeFile(filename string, data []byte, perm os.FileMode) error {
var err error
@@ -111,14 +111,14 @@ func writeFile(filename string, data []byte, perm os.FileMode) error {
}
return nil
}
- return ioutil.WriteFile(filename, data, perm)
+ return os.WriteFile(filename, data, perm)
}
-// readFile is a wrapper of ioutil.ReadFile which considers the
+// readFile is a wrapper of os.ReadFile which considers the
// special case of stdin filename
func readFile(filename string) ([]byte, error) {
if filename == stdinFileNameValue {
- return ioutil.ReadAll(os.Stdin)
+ return io.ReadAll(os.Stdin)
}
- return ioutil.ReadFile(filename)
+ return os.ReadFile(filename)
}
diff --git a/cmd/algokey/keyreg.go b/cmd/algokey/keyreg.go
index 24ddafdd1..156697e7f 100644
--- a/cmd/algokey/keyreg.go
+++ b/cmd/algokey/keyreg.go
@@ -20,7 +20,6 @@ import (
"encoding/base64"
"errors"
"fmt"
- "io/ioutil"
"os"
"strings"
@@ -75,10 +74,14 @@ func init() {
keyregCmd.Flags().Uint64Var(&params.fee, "fee", minFee, "transaction fee")
keyregCmd.Flags().Uint64Var(&params.firstValid, "firstvalid", 0, "first round where the transaction may be committed to the ledger")
- keyregCmd.MarkFlagRequired("firstvalid") // nolint:errcheck
+ if err := keyregCmd.MarkFlagRequired("firstvalid"); err != nil {
+ panic(err)
+ }
keyregCmd.Flags().Uint64Var(&params.lastValid, "lastvalid", 0, fmt.Sprintf("last round where the generated transaction may be committed to the ledger, defaults to firstvalid + %d", txnLife))
keyregCmd.Flags().StringVar(&params.network, "network", "mainnet", "the network where the provided keys will be registered, one of mainnet/testnet/betanet")
- keyregCmd.MarkFlagRequired("network") // nolint:errcheck
+ if err := keyregCmd.MarkFlagRequired("network"); err != nil {
+ panic(err)
+ }
keyregCmd.Flags().BoolVar(&params.offline, "offline", false, "set to bring an account offline")
keyregCmd.Flags().StringVarP(&params.txFile, "outputFile", "o", "", fmt.Sprintf("write signed transaction to this file, or '%s' to write to stdout", stdoutFilenameValue))
keyregCmd.Flags().StringVar(&params.partkeyFile, "keyfile", "", "participation keys to register, file is opened to fetch metadata for the transaction; only specify when bringing an account online to vote in Algorand consensus")
@@ -244,7 +247,7 @@ func run(params keyregCmdParams) error {
return fmt.Errorf("failed to write transaction to stdout: %w", err)
}
} else {
- if err = ioutil.WriteFile(params.txFile, data, 0600); err != nil {
+ if err = os.WriteFile(params.txFile, data, 0600); err != nil {
return fmt.Errorf("failed to write transaction to '%s': %w", params.txFile, err)
}
}
diff --git a/cmd/algokey/multisig.go b/cmd/algokey/multisig.go
index 7c8ae8104..b6d0bb108 100644
--- a/cmd/algokey/multisig.go
+++ b/cmd/algokey/multisig.go
@@ -19,7 +19,6 @@ package main
import (
"fmt"
"io"
- "io/ioutil"
"os"
"strconv"
"strings"
@@ -66,14 +65,14 @@ var multisigCmd = &cobra.Command{
seed := loadKeyfileOrMnemonic(multisigKeyfile, multisigMnemonic)
key := crypto.GenerateSignatureSecrets(seed)
- txdata, err := ioutil.ReadFile(multisigTxfile)
+ txdata, err := os.ReadFile(multisigTxfile)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot read transactions from %s: %v\n", multisigTxfile, err)
os.Exit(1)
}
var outBytes []byte
- dec := protocol.NewDecoderBytes(txdata)
+ dec := protocol.NewMsgpDecoderBytes(txdata)
for {
var stxn transactions.SignedTxn
err = dec.Decode(&stxn)
@@ -101,7 +100,7 @@ var multisigCmd = &cobra.Command{
outBytes = append(outBytes, protocol.Encode(&stxn)...)
}
- err = ioutil.WriteFile(multisigOutfile, outBytes, 0600)
+ err = os.WriteFile(multisigOutfile, outBytes, 0600)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot write signed transactions to %s: %v\n", multisigOutfile, err)
os.Exit(1)
@@ -123,7 +122,7 @@ var appendAuthAddrCmd = &cobra.Command{
}
var outBytes []byte
- dec := protocol.NewDecoderBytes(txdata)
+ dec := protocol.NewMsgpDecoderBytes(txdata)
var stxn transactions.SignedTxn
err = dec.Decode(&stxn)
diff --git a/cmd/algokey/sign.go b/cmd/algokey/sign.go
index 9afa5a3d3..14f14e58b 100644
--- a/cmd/algokey/sign.go
+++ b/cmd/algokey/sign.go
@@ -19,7 +19,6 @@ package main
import (
"fmt"
"io"
- "io/ioutil"
"os"
"github.com/spf13/cobra"
@@ -52,14 +51,14 @@ var signCmd = &cobra.Command{
seed := loadKeyfileOrMnemonic(signKeyfile, signMnemonic)
key := crypto.GenerateSignatureSecrets(seed)
- txdata, err := ioutil.ReadFile(signTxfile)
+ txdata, err := os.ReadFile(signTxfile)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot read transactions from %s: %v\n", signTxfile, err)
os.Exit(1)
}
var outBytes []byte
- dec := protocol.NewDecoderBytes(txdata)
+ dec := protocol.NewMsgpDecoderBytes(txdata)
for {
var stxn transactions.SignedTxn
err = dec.Decode(&stxn)
@@ -78,7 +77,7 @@ var signCmd = &cobra.Command{
outBytes = append(outBytes, protocol.Encode(&stxn)...)
}
- err = ioutil.WriteFile(signOutfile, outBytes, 0600)
+ err = os.WriteFile(signOutfile, outBytes, 0600)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot write signed transactions to %s: %v\n", signOutfile, err)
os.Exit(1)
diff --git a/cmd/algons/dnsCmd.go b/cmd/algons/dnsCmd.go
index c6b6a54f8..a1771414c 100644
--- a/cmd/algons/dnsCmd.go
+++ b/cmd/algons/dnsCmd.go
@@ -20,7 +20,6 @@ import (
"bufio"
"context"
"fmt"
- "io/ioutil"
"net"
"os"
"regexp"
@@ -477,7 +476,7 @@ func doExportZone(network string, outputFilename string) bool {
return false
}
if outputFilename != "" {
- err = ioutil.WriteFile(outputFilename, exportedZone, 0666)
+ err = os.WriteFile(outputFilename, exportedZone, 0666)
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to write exported zone file : %v\n", err)
return false
diff --git a/cmd/buildtools/genesis.go b/cmd/buildtools/genesis.go
index 98cb60ca6..e1aab257a 100644
--- a/cmd/buildtools/genesis.go
+++ b/cmd/buildtools/genesis.go
@@ -18,7 +18,6 @@ package main
import (
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"time"
@@ -102,7 +101,7 @@ var timestampCmd = &cobra.Command{
// Write out the genesis file in the same way we do to generate originally
// (see gen/generate.go)
jsonData := protocol.EncodeJSON(genesis)
- err = ioutil.WriteFile(timestampFile, append(jsonData, '\n'), 0666)
+ err = os.WriteFile(timestampFile, append(jsonData, '\n'), 0666)
if err != nil {
reportErrorf("Error saving genesis file '%s': %v\n", timestampFile, err)
}
@@ -117,7 +116,7 @@ var dumpGenesisIDCmd = &cobra.Command{
Short: "Dump the genesis ID for the specified genesis file",
Run: func(cmd *cobra.Command, args []string) {
// Load genesis
- genesisText, err := ioutil.ReadFile(genesisFile)
+ genesisText, err := os.ReadFile(genesisFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot read genesis file %s: %v\n", genesisFile, err)
os.Exit(1)
@@ -139,7 +138,7 @@ var dumpGenesisHashCmd = &cobra.Command{
Short: "Dump the genesis Hash for the specified genesis file",
Run: func(cmd *cobra.Command, args []string) {
// Load genesis
- genesisText, err := ioutil.ReadFile(genesisFile)
+ genesisText, err := os.ReadFile(genesisFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot read genesis file %s: %v\n", genesisFile, err)
os.Exit(1)
@@ -206,7 +205,7 @@ var ensureCmd = &cobra.Command{
} else {
// Write source genesis (now updated with release timestamp, if applicable)
jsonData := protocol.EncodeJSON(sourceGenesis)
- err = ioutil.WriteFile(targetFile, jsonData, 0666)
+ err = os.WriteFile(targetFile, jsonData, 0666)
if err != nil {
reportErrorf("Error writing target genesis file '%s': %v\n", targetFile, err)
}
@@ -231,13 +230,13 @@ func ensureReleaseGenesis(src bookkeeping.Genesis, releaseFile string) (err erro
releaseGenesis = src
jsonData := protocol.EncodeJSON(releaseGenesis)
- err = ioutil.WriteFile(releaseFile, jsonData, 0666)
+ err = os.WriteFile(releaseFile, jsonData, 0666)
if err != nil {
return fmt.Errorf("error saving file: %v", err)
}
hash := releaseGenesis.Hash()
- err = ioutil.WriteFile(releaseFileHash, []byte(hash.String()), 0666)
+ err = os.WriteFile(releaseFileHash, []byte(hash.String()), 0666)
if err != nil {
return fmt.Errorf("error saving hash file '%s': %v", releaseFileHash, err)
}
@@ -278,7 +277,7 @@ func verifyGenesisHashes(src, release bookkeeping.Genesis, hashFile string) (err
return fmt.Errorf("source and release hashes differ - genesis.json may have diverge from released version")
}
- relHashBytes, err := ioutil.ReadFile(hashFile)
+ relHashBytes, err := os.ReadFile(hashFile)
if err != nil {
return fmt.Errorf("error loading release hash file '%s'", hashFile)
}
diff --git a/cmd/catchpointdump/net.go b/cmd/catchpointdump/net.go
index 9e8dc4561..9073ece66 100644
--- a/cmd/catchpointdump/net.go
+++ b/cmd/catchpointdump/net.go
@@ -303,7 +303,9 @@ func deleteLedgerFiles(deleteTracker bool) error {
func loadAndDump(addr string, tarFile string, genesisInitState ledgercore.InitState) error {
// delete current ledger files.
- deleteLedgerFiles(true)
+ if err := deleteLedgerFiles(true); err != nil {
+ reportWarnf("Error deleting ledger files: %v", err)
+ }
cfg := config.GetDefaultLocal()
l, err := ledger.OpenLedger(logging.Base(), "./ledger", false, genesisInitState, cfg)
if err != nil {
@@ -311,7 +313,11 @@ func loadAndDump(addr string, tarFile string, genesisInitState ledgercore.InitSt
return err
}
- defer deleteLedgerFiles(!loadOnly)
+ defer func() {
+ if err := deleteLedgerFiles(!loadOnly); err != nil {
+ reportWarnf("Error deleting ledger files: %v", err)
+ }
+ }()
defer l.Close()
catchupAccessor := ledger.MakeCatchpointCatchupAccessor(l, logging.Base())
diff --git a/cmd/catchupsrv/download.go b/cmd/catchupsrv/download.go
index 813da9d03..6a5880d42 100644
--- a/cmd/catchupsrv/download.go
+++ b/cmd/catchupsrv/download.go
@@ -20,7 +20,7 @@ import (
"context"
"flag"
"fmt"
- "io/ioutil"
+ "io"
"net"
"net/http"
"os"
@@ -138,7 +138,7 @@ func fetchBlock(server string, blk uint64) error {
return fmt.Errorf("HTTP response: %s", resp.Status)
}
- body, err := ioutil.ReadAll(resp.Body)
+ body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
@@ -156,7 +156,7 @@ func fetchBlock(server string, blk uint64) error {
panic(err)
}
- return ioutil.WriteFile(fn, body, 0666)
+ return os.WriteFile(fn, body, 0666)
}
func fetcher(server string, wg *sync.WaitGroup) {
diff --git a/cmd/catchupsrv/main.go b/cmd/catchupsrv/main.go
index 86fd9645a..1f0de542a 100644
--- a/cmd/catchupsrv/main.go
+++ b/cmd/catchupsrv/main.go
@@ -20,7 +20,6 @@ import (
"encoding/base64"
"flag"
"fmt"
- "io/ioutil"
"math/rand"
"net/http"
"os"
@@ -118,7 +117,7 @@ func main() {
var data []byte
if *dirFlag != "" {
blkPath := blockToPath(roundNumber)
- data, err = ioutil.ReadFile(
+ data, err = os.ReadFile(
path.Join(
*dirFlag,
"v"+versionStr,
diff --git a/cmd/dbgen/main.go b/cmd/dbgen/main.go
index 078f30a3b..f73809ed7 100644
--- a/cmd/dbgen/main.go
+++ b/cmd/dbgen/main.go
@@ -21,7 +21,7 @@ package main
import (
"flag"
"fmt"
- "io/ioutil"
+ "os"
"strings"
"time"
)
@@ -59,13 +59,13 @@ func main() {
if *inputfilename == "" {
panic("error: No database schema file specified")
}
- input, err := ioutil.ReadFile(*inputfilename)
+ input, err := os.ReadFile(*inputfilename)
if err != nil {
panic(err)
}
header := ""
if *headerfilename != "" {
- headerBytes, err := ioutil.ReadFile(*headerfilename)
+ headerBytes, err := os.ReadFile(*headerfilename)
if err != nil {
panic(err)
}
@@ -78,7 +78,7 @@ func main() {
if *outputfilename == "" {
fmt.Println(payload)
} else {
- err := ioutil.WriteFile(*outputfilename, []byte(payload), 0666)
+ err := os.WriteFile(*outputfilename, []byte(payload), 0666)
if err != nil {
panic(err)
}
diff --git a/cmd/dispenser/server.go b/cmd/dispenser/server.go
index 39ecb4184..d4ec0b5b8 100644
--- a/cmd/dispenser/server.go
+++ b/cmd/dispenser/server.go
@@ -21,7 +21,7 @@ import (
"encoding/json"
"flag"
"fmt"
- "io/ioutil"
+ "io"
"log"
"net/http"
"net/url"
@@ -150,7 +150,7 @@ func (cfg dispenserSiteConfig) checkRecaptcha(remoteip, response string) (r reca
}
defer resp.Body.Close()
- body, err := ioutil.ReadAll(resp.Body)
+ body, err := io.ReadAll(resp.Body)
if err != nil {
return
}
@@ -219,7 +219,7 @@ func main() {
os.Exit(1)
}
- configText, err := ioutil.ReadFile(*configFile)
+ configText, err := os.ReadFile(*configFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot read config file (%s): %v\n", *configFile, err)
os.Exit(1)
@@ -237,7 +237,7 @@ func main() {
var hosts []string
for h, cfg := range configMap {
// Make a cache dir for wallet handle tokens
- cacheDir, err := ioutil.TempDir("", "dispenser")
+ cacheDir, err := os.MkdirTemp("", "dispenser")
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot make temp dir: %v\n", err)
os.Exit(1)
diff --git a/cmd/goal/account.go b/cmd/goal/account.go
index 6cd3b9a87..62f1b08c8 100644
--- a/cmd/goal/account.go
+++ b/cmd/goal/account.go
@@ -20,7 +20,6 @@ import (
"bufio"
"encoding/base64"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"sort"
@@ -540,9 +539,7 @@ func printAccountInfo(client libgoal.Client, address string, onlyShowAssetIds bo
var createdAssets []generatedV2.Asset
if account.CreatedAssets != nil {
createdAssets = make([]generatedV2.Asset, len(*account.CreatedAssets))
- for i, asset := range *account.CreatedAssets {
- createdAssets[i] = asset
- }
+ copy(createdAssets, *account.CreatedAssets)
sort.Slice(createdAssets, func(i, j int) bool {
return createdAssets[i].Index < createdAssets[j].Index
})
@@ -551,9 +548,7 @@ func printAccountInfo(client libgoal.Client, address string, onlyShowAssetIds bo
var heldAssets []generatedV2.AssetHolding
if account.Assets != nil {
heldAssets = make([]generatedV2.AssetHolding, len(*account.Assets))
- for i, assetHolding := range *account.Assets {
- heldAssets[i] = assetHolding
- }
+ copy(heldAssets, *account.Assets)
sort.Slice(heldAssets, func(i, j int) bool {
return heldAssets[i].AssetId < heldAssets[j].AssetId
})
@@ -562,9 +557,7 @@ func printAccountInfo(client libgoal.Client, address string, onlyShowAssetIds bo
var createdApps []generatedV2.Application
if account.CreatedApps != nil {
createdApps = make([]generatedV2.Application, len(*account.CreatedApps))
- for i, app := range *account.CreatedApps {
- createdApps[i] = app
- }
+ copy(createdApps, *account.CreatedApps)
sort.Slice(createdApps, func(i, j int) bool {
return createdApps[i].Id < createdApps[j].Id
})
@@ -573,9 +566,7 @@ func printAccountInfo(client libgoal.Client, address string, onlyShowAssetIds bo
var optedInApps []generatedV2.ApplicationLocalState
if account.AppsLocalState != nil {
optedInApps = make([]generatedV2.ApplicationLocalState, len(*account.AppsLocalState))
- for i, appLocalState := range *account.AppsLocalState {
- optedInApps[i] = appLocalState
- }
+ copy(optedInApps, *account.AppsLocalState)
sort.Slice(optedInApps, func(i, j int) bool {
return optedInApps[i].Id < optedInApps[j].Id
})
@@ -1299,7 +1290,7 @@ var importRootKeysCmd = &cobra.Command{
}
keyDir := filepath.Join(dataDir, genID)
- files, err := ioutil.ReadDir(keyDir)
+ files, err := os.ReadDir(keyDir)
if err != nil {
return
}
@@ -1483,7 +1474,7 @@ func listParticipationKeyFiles(c *libgoal.Client) (partKeyFiles map[string]algod
// Get a list of files in the participation keys directory
keyDir := filepath.Join(c.DataDir(), genID)
- files, err := ioutil.ReadDir(keyDir)
+ files, err := os.ReadDir(keyDir)
if err != nil {
return
}
diff --git a/cmd/goal/accountsList.go b/cmd/goal/accountsList.go
index 56de35deb..dc646ffb0 100644
--- a/cmd/goal/accountsList.go
+++ b/cmd/goal/accountsList.go
@@ -19,7 +19,6 @@ package main
import (
"encoding/json"
"fmt"
- "io/ioutil"
"os"
"os/user"
"path/filepath"
@@ -184,7 +183,7 @@ func (accountList *AccountsList) getNameByAddress(address string) string {
func (accountList *AccountsList) dumpList() {
accountsListJSON, _ := json.MarshalIndent(accountList, "", " ")
accountsListJSON = append(accountsListJSON, '\n')
- err := ioutil.WriteFile(accountList.accountListFileName(), accountsListJSON, 0644)
+ err := os.WriteFile(accountList.accountListFileName(), accountsListJSON, 0644)
if err != nil {
log.Error(err.Error())
@@ -197,7 +196,7 @@ func (accountList *AccountsList) loadList() {
// First, check if the file exists.
filename := accountList.accountListFileName()
if _, err := os.Stat(filename); err == nil {
- raw, err := ioutil.ReadFile(filename)
+ raw, err := os.ReadFile(filename)
if err != nil {
log.Error(err.Error())
}
diff --git a/cmd/goal/application.go b/cmd/goal/application.go
index 884c3d6e6..eaa7de91e 100644
--- a/cmd/goal/application.go
+++ b/cmd/goal/application.go
@@ -23,6 +23,7 @@ import (
"encoding/base64"
"encoding/binary"
"encoding/hex"
+ "encoding/json"
"errors"
"fmt"
"net/http"
@@ -32,9 +33,9 @@ import (
"github.com/spf13/cobra"
+ "github.com/algorand/avm-abi/abi"
"github.com/algorand/go-algorand/crypto"
apiclient "github.com/algorand/go-algorand/daemon/algod/api/client"
- "github.com/algorand/go-algorand/data/abi"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
@@ -188,9 +189,15 @@ func init() {
infoAppCmd.MarkFlagRequired("app-id")
- methodAppCmd.MarkFlagRequired("method") // nolint:errcheck // follow previous required flag format
- methodAppCmd.MarkFlagRequired("from") // nolint:errcheck
- methodAppCmd.Flags().MarkHidden("app-arg") // nolint:errcheck
+ panicIfErr(methodAppCmd.MarkFlagRequired("method"))
+ panicIfErr(methodAppCmd.MarkFlagRequired("from"))
+ panicIfErr(appCmd.PersistentFlags().MarkHidden("app-arg"))
+}
+
+func panicIfErr(err error) {
+ if err != nil {
+ panic(err)
+ }
}
type appCallArg struct {
@@ -1169,6 +1176,76 @@ func populateMethodCallReferenceArgs(sender string, currentApp uint64, types []s
return resolvedIndexes, nil
}
+// maxAppArgs is the maximum number of arguments for an application call transaction, in compliance
+// with ARC-4. Currently this is the same as the MaxAppArgs consensus parameter, but the
+// difference is that the consensus parameter is liable to change in a future consensus upgrade.
+// However, the ARC-4 ABI argument encoding **MUST** always remain the same.
+const maxAppArgs = 16
+
+// The tuple threshold is maxAppArgs, minus 1 for the method selector in the first app arg,
+// minus 1 for the final app argument becoming a tuple of the remaining method args
+const methodArgsTupleThreshold = maxAppArgs - 2
+
+// parseArgJSONtoByteSlice convert input method arguments to ABI encoded bytes
+// it converts funcArgTypes into a tuple type and apply changes over input argument string (in JSON format)
+// if there are greater or equal to 15 inputs, then we compact the tailing inputs into one tuple
+func parseMethodArgJSONtoByteSlice(argTypes []string, jsonArgs []string, applicationArgs *[][]byte) error {
+ abiTypes := make([]abi.Type, len(argTypes))
+ for i, typeString := range argTypes {
+ abiType, err := abi.TypeOf(typeString)
+ if err != nil {
+ return err
+ }
+ abiTypes[i] = abiType
+ }
+
+ if len(abiTypes) != len(jsonArgs) {
+ return fmt.Errorf("input argument number %d != method argument number %d", len(jsonArgs), len(abiTypes))
+ }
+
+ // Up to 16 app arguments can be passed to app call. First is reserved for method selector,
+ // and the rest are for method call arguments. But if more than 15 method call arguments
+ // are present, then the method arguments after the 14th are placed in a tuple in the last
+ // app argument slot
+ if len(abiTypes) > maxAppArgs-1 {
+ typesForTuple := make([]abi.Type, len(abiTypes)-methodArgsTupleThreshold)
+ copy(typesForTuple, abiTypes[methodArgsTupleThreshold:])
+
+ compactedType, err := abi.MakeTupleType(typesForTuple)
+ if err != nil {
+ return err
+ }
+
+ abiTypes = append(abiTypes[:methodArgsTupleThreshold], compactedType)
+
+ tupleValues := make([]json.RawMessage, len(jsonArgs)-methodArgsTupleThreshold)
+ for i, jsonArg := range jsonArgs[methodArgsTupleThreshold:] {
+ tupleValues[i] = []byte(jsonArg)
+ }
+
+ remainingJSON, err := json.Marshal(tupleValues)
+ if err != nil {
+ return err
+ }
+
+ jsonArgs = append(jsonArgs[:methodArgsTupleThreshold], string(remainingJSON))
+ }
+
+ // parse JSON value to ABI encoded bytes
+ for i := 0; i < len(jsonArgs); i++ {
+ interfaceVal, err := abiTypes[i].UnmarshalFromJSON([]byte(jsonArgs[i]))
+ if err != nil {
+ return err
+ }
+ abiEncoded, err := abiTypes[i].Encode(interfaceVal)
+ if err != nil {
+ return err
+ }
+ *applicationArgs = append(*applicationArgs, abiEncoded)
+ }
+ return nil
+}
+
var methodAppCmd = &cobra.Command{
Use: "method",
Short: "Invoke an ABI method",
@@ -1284,7 +1361,7 @@ var methodAppCmd = &cobra.Command{
basicArgValues[basicArgIndex] = strconv.Itoa(resolved)
}
- err = abi.ParseArgJSONtoByteSlice(basicArgTypes, basicArgValues, &applicationArgs)
+ err = parseMethodArgJSONtoByteSlice(basicArgTypes, basicArgValues, &applicationArgs)
if err != nil {
reportErrorf("cannot parse arguments to ABI encoding: %v", err)
}
diff --git a/cmd/goal/application_test.go b/cmd/goal/application_test.go
new file mode 100644
index 000000000..7de23a5be
--- /dev/null
+++ b/cmd/goal/application_test.go
@@ -0,0 +1,143 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestParseMethodArgJSONtoByteSlice(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ makeRepeatSlice := func(size int, value string) []string {
+ slice := make([]string, size)
+ for i := range slice {
+ slice[i] = value
+ }
+ return slice
+ }
+
+ tests := []struct {
+ argTypes []string
+ jsonArgs []string
+ expectedAppArgs [][]byte
+ }{
+ {
+ argTypes: []string{},
+ jsonArgs: []string{},
+ expectedAppArgs: [][]byte{},
+ },
+ {
+ argTypes: []string{"uint8"},
+ jsonArgs: []string{"100"},
+ expectedAppArgs: [][]byte{{100}},
+ },
+ {
+ argTypes: []string{"uint8", "uint16"},
+ jsonArgs: []string{"100", "65535"},
+ expectedAppArgs: [][]byte{{100}, {255, 255}},
+ },
+ {
+ argTypes: makeRepeatSlice(15, "string"),
+ jsonArgs: []string{
+ `"a"`,
+ `"b"`,
+ `"c"`,
+ `"d"`,
+ `"e"`,
+ `"f"`,
+ `"g"`,
+ `"h"`,
+ `"i"`,
+ `"j"`,
+ `"k"`,
+ `"l"`,
+ `"m"`,
+ `"n"`,
+ `"o"`,
+ },
+ expectedAppArgs: [][]byte{
+ {00, 01, 97},
+ {00, 01, 98},
+ {00, 01, 99},
+ {00, 01, 100},
+ {00, 01, 101},
+ {00, 01, 102},
+ {00, 01, 103},
+ {00, 01, 104},
+ {00, 01, 105},
+ {00, 01, 106},
+ {00, 01, 107},
+ {00, 01, 108},
+ {00, 01, 109},
+ {00, 01, 110},
+ {00, 01, 111},
+ },
+ },
+ {
+ argTypes: makeRepeatSlice(16, "string"),
+ jsonArgs: []string{
+ `"a"`,
+ `"b"`,
+ `"c"`,
+ `"d"`,
+ `"e"`,
+ `"f"`,
+ `"g"`,
+ `"h"`,
+ `"i"`,
+ `"j"`,
+ `"k"`,
+ `"l"`,
+ `"m"`,
+ `"n"`,
+ `"o"`,
+ `"p"`,
+ },
+ expectedAppArgs: [][]byte{
+ {00, 01, 97},
+ {00, 01, 98},
+ {00, 01, 99},
+ {00, 01, 100},
+ {00, 01, 101},
+ {00, 01, 102},
+ {00, 01, 103},
+ {00, 01, 104},
+ {00, 01, 105},
+ {00, 01, 106},
+ {00, 01, 107},
+ {00, 01, 108},
+ {00, 01, 109},
+ {00, 01, 110},
+ {00, 04, 00, 07, 00, 01, 111, 00, 01, 112},
+ },
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(fmt.Sprintf("index=%d", i), func(t *testing.T) {
+ applicationArgs := [][]byte{}
+ err := parseMethodArgJSONtoByteSlice(test.argTypes, test.jsonArgs, &applicationArgs)
+ require.NoError(t, err)
+ require.Equal(t, test.expectedAppArgs, applicationArgs)
+ })
+ }
+}
diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go
index 2be5ff332..8ab329326 100644
--- a/cmd/goal/clerk.go
+++ b/cmd/goal/clerk.go
@@ -544,7 +544,7 @@ var rawsendCmd = &cobra.Command{
reportErrorf(fileReadError, txFilename, err)
}
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
client := ensureAlgodClient(ensureSingleDataDir())
txnIDs := make(map[transactions.Txid]transactions.SignedTxn)
@@ -673,7 +673,7 @@ var inspectCmd = &cobra.Command{
reportErrorf(fileReadError, txFilename, err)
}
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
count := 0
for {
var txn transactions.SignedTxn
@@ -773,7 +773,7 @@ var signCmd = &cobra.Command{
}
var outData []byte
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
// read the entire file and prepare in-memory copy of each signed transaction, with grouping.
txnGroups := make(map[crypto.Digest][]*transactions.SignedTxn)
var groupsOrder []crypto.Digest
@@ -868,7 +868,7 @@ var groupCmd = &cobra.Command{
reportErrorf(fileReadError, txFilename, err)
}
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
var stxns []transactions.SignedTxn
var group transactions.TxGroup
@@ -920,7 +920,7 @@ var splitCmd = &cobra.Command{
reportErrorf(fileReadError, txFilename, err)
}
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
var txns []transactions.SignedTxn
for {
@@ -1120,7 +1120,7 @@ var dryrunCmd = &cobra.Command{
if err != nil {
reportErrorf(fileReadError, txFilename, err)
}
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
stxns := make([]transactions.SignedTxn, 0, 10)
for {
var txn transactions.SignedTxn
diff --git a/cmd/goal/commands.go b/cmd/goal/commands.go
index 4f93b6fb4..c6103d259 100644
--- a/cmd/goal/commands.go
+++ b/cmd/goal/commands.go
@@ -19,7 +19,6 @@ package main
import (
"fmt"
"io"
- "io/ioutil"
"os"
"os/exec"
"os/user"
@@ -237,7 +236,7 @@ var protoCmd = &cobra.Command{
func readGenesis(dataDir string) (genesis bookkeeping.Genesis, err error) {
path := filepath.Join(dataDir, config.GenesisJSONFile)
- genesisText, err := ioutil.ReadFile(path)
+ genesisText, err := os.ReadFile(path)
if err != nil {
return
}
@@ -564,7 +563,7 @@ func reportErrorf(format string, args ...interface{}) {
reportErrorln(fmt.Sprintf(format, args...))
}
-// writeFile is a wrapper of ioutil.WriteFile which considers the special
+// writeFile is a wrapper of os.WriteFile which considers the special
// case of stdout filename
func writeFile(filename string, data []byte, perm os.FileMode) error {
var err error
@@ -575,7 +574,7 @@ func writeFile(filename string, data []byte, perm os.FileMode) error {
}
return nil
}
- return ioutil.WriteFile(filename, data, perm)
+ return os.WriteFile(filename, data, perm)
}
// writeDryrunReqToFile creates dryrun request object and writes to a file
@@ -593,13 +592,13 @@ func writeDryrunReqToFile(client libgoal.Client, txnOrStxn interface{}, outFilen
return
}
-// readFile is a wrapper of ioutil.ReadFile which considers the
+// readFile is a wrapper of os.ReadFile which considers the
// special case of stdin filename
func readFile(filename string) ([]byte, error) {
if filename == stdinFileNameValue {
- return ioutil.ReadAll(os.Stdin)
+ return io.ReadAll(os.Stdin)
}
- return ioutil.ReadFile(filename)
+ return os.ReadFile(filename)
}
func checkTxValidityPeriodCmdFlags(cmd *cobra.Command) {
diff --git a/cmd/goal/multisig.go b/cmd/goal/multisig.go
index 6e55fcc9e..643b335d0 100644
--- a/cmd/goal/multisig.go
+++ b/cmd/goal/multisig.go
@@ -19,7 +19,6 @@ package main
import (
"fmt"
"io"
- "io/ioutil"
"os"
"github.com/spf13/cobra"
@@ -96,7 +95,7 @@ var addSigCmd = &cobra.Command{
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
var outData []byte
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
for {
var stxn transactions.SignedTxn
err = dec.Decode(&stxn)
@@ -240,12 +239,12 @@ var mergeSigCmd = &cobra.Command{
var txnLists [][]transactions.SignedTxn
for _, arg := range args {
- data, err := ioutil.ReadFile(arg)
+ data, err := os.ReadFile(arg)
if err != nil {
reportErrorf(fileReadError, arg, err)
}
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
var txns []transactions.SignedTxn
for {
var txn transactions.SignedTxn
diff --git a/cmd/goal/node.go b/cmd/goal/node.go
index 1624603e3..68654055a 100644
--- a/cmd/goal/node.go
+++ b/cmd/goal/node.go
@@ -23,7 +23,7 @@ import (
"encoding/json"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"net"
"net/http"
"os"
@@ -131,7 +131,7 @@ func getMissingCatchpointLabel(URL string) (label string, err error) {
err = errors.New(resp.Status)
return
}
- body, err := ioutil.ReadAll(resp.Body)
+ body, err := io.ReadAll(resp.Body)
if err != nil {
return
}
@@ -648,7 +648,7 @@ var createCmd = &cobra.Command{
}
// copy genesis block to destination
- err = ioutil.WriteFile(destPath, genesisContent, 0644)
+ err = os.WriteFile(destPath, genesisContent, 0644)
if err != nil {
reportErrorf(errorNodeCreation, err)
}
diff --git a/cmd/goal/tealsign.go b/cmd/goal/tealsign.go
index 3d35624a1..5ff7617d0 100644
--- a/cmd/goal/tealsign.go
+++ b/cmd/goal/tealsign.go
@@ -19,7 +19,7 @@ package main
import (
"encoding/base32"
"encoding/base64"
- "io/ioutil"
+ "os"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
@@ -83,7 +83,7 @@ The base64 encoding of the signature will always be printed to stdout. Optionall
var kdata []byte
var err error
if keyFilename != "" {
- kdata, err = ioutil.ReadFile(keyFilename)
+ kdata, err = os.ReadFile(keyFilename)
if err != nil {
reportErrorf(tealsignKeyfileFail, err)
}
@@ -123,7 +123,7 @@ The base64 encoding of the signature will always be printed to stdout. Optionall
if lsigTxnFilename != "" {
// If passed a SignedTxn with a logic sig, compute
// the hash of the program within the logic sig
- stxnBytes, err := ioutil.ReadFile(lsigTxnFilename)
+ stxnBytes, err := os.ReadFile(lsigTxnFilename)
if err != nil {
reportErrorf(fileReadError, lsigTxnFilename, err)
}
@@ -159,7 +159,7 @@ The base64 encoding of the signature will always be printed to stdout. Optionall
var dataToSign []byte
if dataFile != "" {
- dataToSign, err = ioutil.ReadFile(dataFile)
+ dataToSign, err = os.ReadFile(dataFile)
if err != nil {
reportErrorf(tealsignParseData, err)
}
diff --git a/cmd/loadgenerator/main.go b/cmd/loadgenerator/main.go
index 25026f7f9..37e113471 100644
--- a/cmd/loadgenerator/main.go
+++ b/cmd/loadgenerator/main.go
@@ -20,7 +20,6 @@ import (
"flag"
"fmt"
"io/fs"
- "io/ioutil"
"net/url"
"os"
"path/filepath"
@@ -66,9 +65,9 @@ func loadMnemonic(mnemonic string) crypto.Seed {
// Like shared/pingpong/accounts.go
func findRootKeys(algodDir string) []*crypto.SignatureSecrets {
keylist := make([]*crypto.SignatureSecrets, 0, 5)
- err := filepath.Walk(algodDir, func(path string, info fs.FileInfo, err error) error {
+ err := filepath.Walk(algodDir, func(path string, info fs.FileInfo, _ error) error {
var handle db.Accessor
- handle, err = db.MakeErasableAccessor(path)
+ handle, err := db.MakeErasableAccessor(path)
if err != nil {
return nil // don't care, move on
}
@@ -107,10 +106,10 @@ func main() {
if (cfg.ClientURL == nil || cfg.ClientURL.String() == "") || cfg.APIToken == "" {
if algodDir != "" {
path := filepath.Join(algodDir, "algod.net")
- net, err := ioutil.ReadFile(path)
+ net, err := os.ReadFile(path)
maybefail(err, "%s: %v\n", path, err)
path = filepath.Join(algodDir, "algod.token")
- token, err := ioutil.ReadFile(path)
+ token, err := os.ReadFile(path)
maybefail(err, "%s: %v\n", path, err)
cfg.ClientURL, err = url.Parse(fmt.Sprintf("http://%s", string(strings.TrimSpace(string(net)))))
maybefail(err, "bad net url %v\n", err)
@@ -126,8 +125,9 @@ func main() {
var publicKeys []basics.Address
addKey := func(mnemonic string) {
seed := loadMnemonic(mnemonic)
- privateKeys = append(privateKeys, crypto.GenerateSignatureSecrets(seed))
- publicKeys = append(publicKeys, basics.Address(privateKeys[0].SignatureVerifier))
+ secrets := crypto.GenerateSignatureSecrets(seed)
+ privateKeys = append(privateKeys, secrets)
+ publicKeys = append(publicKeys, basics.Address(secrets.SignatureVerifier))
}
if cfg.AccountMnemonic != "" { // one mnemonic provided
addKey(cfg.AccountMnemonic)
@@ -241,7 +241,7 @@ func generateTransactions(restClient client.RestClient, cfg config, privateKeys
sendSize = transactionBlockSize
}
// create sendSize transaction to send.
- txns := make([]transactions.SignedTxn, sendSize, sendSize)
+ txns := make([]transactions.SignedTxn, sendSize)
for i := range txns {
tx := transactions.Transaction{
Header: transactions.Header{
@@ -289,7 +289,7 @@ func generateTransactions(restClient client.RestClient, cfg config, privateKeys
for i := 0; i < nroutines; i++ {
totalSent += sent[i]
}
- dt := time.Now().Sub(start)
+ dt := time.Since(start)
fmt.Fprintf(os.Stdout, "sent %d/%d in %s (%.1f/s)\n", totalSent, sendSize, dt.String(), float64(totalSent)/dt.Seconds())
if cfg.TxnsToSend != 0 {
// We attempted what we were asked. We're done.
diff --git a/cmd/netgoal/generate.go b/cmd/netgoal/generate.go
index 8837973db..725b5e5cf 100644
--- a/cmd/netgoal/generate.go
+++ b/cmd/netgoal/generate.go
@@ -39,6 +39,7 @@ var templateToGenerate string
var relaysToGenerate int
var nodesToGenerate int
var nodeHostsToGenerate int
+var nonPartnodesToGenerate int
var nonPartnodesHostsToGenerate int
var walletsToGenerate int
var nodeTemplatePath string
@@ -64,6 +65,7 @@ func init() {
generateCmd.Flags().IntVarP(&relaysToGenerate, "relays", "R", -1, "Relays to generate")
generateCmd.Flags().IntVarP(&nodeHostsToGenerate, "node-hosts", "N", -1, "Node-hosts to generate, default=nodes")
generateCmd.Flags().IntVarP(&nodesToGenerate, "nodes", "n", -1, "Nodes to generate")
+ generateCmd.Flags().IntVarP(&nonPartnodesToGenerate, "non-participating-nodes", "X", 0, "Non participating nodes to generate")
generateCmd.Flags().IntVarP(&nonPartnodesHostsToGenerate, "non-participating-nodes-hosts", "H", 0, "Non participating nodes hosts to generate")
generateCmd.Flags().StringVarP(&nodeTemplatePath, "node-template", "", "", "json for one node")
generateCmd.Flags().StringVarP(&nonParticipatingNodeTemplatePath, "non-participating-node-template", "", "", "json for non participating node")
@@ -147,7 +149,7 @@ template modes for -t:`,
if walletsToGenerate < 0 {
reportErrorf("must specify number of wallets with -w")
}
- err = generateWalletGenesis(outputFilename, walletsToGenerate, nonPartnodesHostsToGenerate)
+ err = generateWalletGenesis(outputFilename, walletsToGenerate, nonPartnodesToGenerate)
case "net", "network", "goalnet":
if walletsToGenerate < 0 {
reportErrorf("must specify number of wallets with -w")
@@ -164,10 +166,10 @@ template modes for -t:`,
if templateType == "goalnet" {
err = generateNetworkGoalTemplate(outputFilename, walletsToGenerate, relaysToGenerate, nodesToGenerate, nonPartnodesHostsToGenerate)
} else {
- err = generateNetworkTemplate(outputFilename, walletsToGenerate, relaysToGenerate, nodeHostsToGenerate, nodesToGenerate, nonPartnodesHostsToGenerate, baseNode, baseNonParticipatingNode, baseRelay)
+ err = generateNetworkTemplate(outputFilename, walletsToGenerate, relaysToGenerate, nodeHostsToGenerate, nodesToGenerate, nonPartnodesHostsToGenerate, nonPartnodesToGenerate, baseNode, baseNonParticipatingNode, baseRelay)
}
case "otwt":
- err = generateNetworkTemplate(outputFilename, 1000, 10, 20, 100, 0, baseNode, baseNonParticipatingNode, baseRelay)
+ err = generateNetworkTemplate(outputFilename, 1000, 10, 20, 100, 0, 0, baseNode, baseNonParticipatingNode, baseRelay)
case "otwg":
err = generateWalletGenesis(outputFilename, 1000, 0)
case "ohwg":
@@ -308,7 +310,7 @@ func generateNetworkGoalTemplate(templateFilename string, wallets, relays, nodes
return saveGoalTemplateToDisk(template, templateFilename)
}
-func generateNetworkTemplate(templateFilename string, wallets, relays, nodeHosts, nodes, npnHosts int, baseNode, baseNonPartNode, baseRelay remote.NodeConfig) error {
+func generateNetworkTemplate(templateFilename string, wallets, relays, nodeHosts, nodes, npnHosts, npns int, baseNode, baseNonPartNode, baseRelay remote.NodeConfig) error {
network := remote.DeployedNetworkConfig{}
relayTemplates := unpackNodeConfig(baseRelay)
@@ -361,6 +363,7 @@ func generateNetworkTemplate(templateFilename string, wallets, relays, nodeHosts
}
}
+ npnHostIndexes := make([]int, 0, npnHosts)
for i := 0; i < npnHosts; i++ {
indexID := strconv.Itoa(i + 1)
@@ -373,8 +376,18 @@ func generateNetworkTemplate(templateFilename string, wallets, relays, nodeHosts
Name: "NPN" + indexID,
Nodes: []remote.NodeConfig{node},
}
+ npnHostIndexes = append(npnHostIndexes, len(network.Hosts))
network.Hosts = append(network.Hosts, host)
}
+ for i := npnHosts; i < npns; i++ {
+ hosti := npnHostIndexes[i%len(npnHostIndexes)]
+ name := "nonParticipatingNode" + strconv.Itoa(i+1)
+ node := pickNodeConfig(npnTemplates, name)
+ node.NodeNameMatchRegex = ""
+ node.FractionApply = 0.0
+ node.Name = name
+ network.Hosts[hosti].Nodes = append(network.Hosts[hosti].Nodes, node)
+ }
walletIndex := 0
for walletIndex < wallets {
@@ -400,9 +413,9 @@ func generateNetworkTemplate(templateFilename string, wallets, relays, nodeHosts
}
// one wallet per NPN host to concentrate stake
- if npnHosts > 0 {
+ if npns > 0 {
walletIndex := 0
- for walletIndex < npnHosts {
+ for walletIndex < npns {
for hosti := range network.Hosts {
for nodei, node := range network.Hosts[hosti].Nodes {
if node.Name[0:4] != "nonP" {
@@ -414,11 +427,11 @@ func generateNetworkTemplate(templateFilename string, wallets, relays, nodeHosts
}
network.Hosts[hosti].Nodes[nodei].Wallets = append(network.Hosts[hosti].Nodes[nodei].Wallets, wallet)
walletIndex++
- if walletIndex >= npnHosts {
+ if walletIndex >= npns {
break
}
}
- if walletIndex >= npnHosts {
+ if walletIndex >= npns {
break
}
}
diff --git a/cmd/nodecfg/apply.go b/cmd/nodecfg/apply.go
index 70008ff9b..77e302c7b 100644
--- a/cmd/nodecfg/apply.go
+++ b/cmd/nodecfg/apply.go
@@ -18,7 +18,6 @@ package main
import (
"fmt"
- "io/ioutil"
"os"
"path/filepath"
@@ -101,7 +100,7 @@ func doApply(rootDir string, rootNodeDir, channel string, hostName string, dnsNa
// If config doesn't already exist, download it to specified root dir
if missing {
fmt.Fprintf(os.Stdout, "Configuration rootdir not specified - downloading latest version...\n")
- rootDir, err = ioutil.TempDir("", channel)
+ rootDir, err = os.MkdirTemp("", channel)
if err != nil {
return fmt.Errorf("error creating temp dir for extracting config package: %v", err)
}
diff --git a/cmd/opdoc/opdoc.go b/cmd/opdoc/opdoc.go
index 94a394bbc..226d87a78 100644
--- a/cmd/opdoc/opdoc.go
+++ b/cmd/opdoc/opdoc.go
@@ -28,7 +28,7 @@ import (
"github.com/algorand/go-algorand/protocol"
)
-var docVersion = 7
+var docVersion = 8
func opGroupMarkdownTable(names []string, out io.Writer) {
fmt.Fprint(out, `| Opcode | Description |
diff --git a/cmd/pingpong/runCmd.go b/cmd/pingpong/runCmd.go
index 779ed3f29..9df0052ac 100644
--- a/cmd/pingpong/runCmd.go
+++ b/cmd/pingpong/runCmd.go
@@ -19,12 +19,14 @@ package main
import (
"context"
"encoding/base64"
+ "encoding/json"
"fmt"
- "io/ioutil"
+ "math/rand"
"os"
"path/filepath"
"runtime/pprof"
"strconv"
+ "strings"
"time"
"github.com/spf13/cobra"
@@ -68,16 +70,21 @@ var rekey bool
var nftAsaPerSecond uint32
var pidFile string
var cpuprofile string
+var randSeed int64
+var deterministicKeys bool
+var generatedAccountsCount uint32
+var generatedAccountSampleMethod string
+var configPath string
func init() {
rootCmd.AddCommand(runCmd)
runCmd.PersistentFlags().StringVarP(&dataDir, "datadir", "d", "", "Data directory for the node")
- runCmd.Flags().StringVarP(&srcAddress, "src", "s", "", "Account address to use as funding source for new accounts)")
+ runCmd.Flags().StringVarP(&srcAddress, "src", "s", "", "Account address to use as funding source for new accounts")
runCmd.Flags().Uint32VarP(&numAccounts, "numaccounts", "n", 0, "The number of accounts to include in the transfers")
runCmd.Flags().Uint64VarP(&maxAmount, "ma", "a", 0, "The (max) amount to be transferred")
runCmd.Flags().Uint64VarP(&minAccountFunds, "minaccount", "", 0, "The minimum amount to fund a test account with")
- runCmd.Flags().Uint64VarP(&txnPerSec, "tps", "t", 200, "Number of Txn per second that pingpong sends")
+ runCmd.Flags().Uint64VarP(&txnPerSec, "tps", "t", 0, "Number of Txn per second that pingpong sends")
runCmd.Flags().Int64VarP(&maxFee, "mf", "f", -1, "The MAX fee to be used for transactions, a value of '0' tells the server to use a suggested fee.")
runCmd.Flags().Uint64VarP(&minFee, "minf", "m", 1000, "The MIN fee to be used for randomFee transactions")
runCmd.Flags().BoolVar(&randomAmount, "ra", false, "Set to enable random amounts (up to maxamount)")
@@ -88,6 +95,7 @@ func init() {
runCmd.Flags().StringVar(&runTime, "run", "", "Duration of time (seconds) to run transfers before resting (0 means non-stop)")
runCmd.Flags().StringVar(&refreshTime, "refresh", "", "Duration of time (seconds) between refilling accounts with money (0 means no refresh)")
runCmd.Flags().StringVar(&logicProg, "program", "", "File containing the compiled program to include as a logic sig")
+ runCmd.Flags().StringVar(&configPath, "config", "", "path to read config json from, or json literal")
runCmd.Flags().BoolVar(&saveConfig, "save", false, "Save the effective configuration to disk")
runCmd.Flags().BoolVar(&useDefault, "reset", false, "Reset to the default configuration (not read from disk)")
runCmd.Flags().BoolVar(&quietish, "quiet", false, "quietish stdout logging")
@@ -108,6 +116,10 @@ func init() {
runCmd.Flags().Uint32Var(&nftAsaPerSecond, "nftasapersecond", 0, "The number of NFT-style ASAs to create per second")
runCmd.Flags().StringVar(&pidFile, "pidfile", "", "path to write process id of this pingpong")
runCmd.Flags().StringVar(&cpuprofile, "cpuprofile", "", "write cpu profile to `file`")
+ runCmd.Flags().Int64Var(&randSeed, "seed", 0, "input to math/rand.Seed(), defaults to time.Now().UnixNano()")
+ runCmd.Flags().BoolVar(&deterministicKeys, "deterministicKeys", false, "Draw from set of netgoal-created accounts using deterministic keys")
+ runCmd.Flags().Uint32Var(&generatedAccountsCount, "genaccounts", 0, "The total number of accounts pre-generated by netgoal")
+ runCmd.Flags().StringVar(&generatedAccountSampleMethod, "gensamplemethod", "random", "The method of sampling from the total # of pre-generated accounts")
}
var runCmd = &cobra.Command{
@@ -116,7 +128,7 @@ var runCmd = &cobra.Command{
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
// Make a cache dir for wallet handle tokens
- cacheDir, err := ioutil.TempDir("", "pingpong")
+ cacheDir, err := os.MkdirTemp("", "pingpong")
if err != nil {
reportErrorf("Cannot make temp dir: %v\n", err)
}
@@ -156,17 +168,45 @@ var runCmd = &cobra.Command{
}
// Prepare configuration
+ dataDirCfgPath := filepath.Join(ac.DataDir(), pingpong.ConfigFilename)
var cfg pingpong.PpConfig
- cfgPath := filepath.Join(ac.DataDir(), pingpong.ConfigFilename)
- if useDefault {
- cfg = pingpong.DefaultConfig
+ if configPath != "" {
+ if configPath[0] == '{' {
+ // json literal as arg
+ cfg = pingpong.DefaultConfig
+ lf := strings.NewReader(configPath)
+ dec := json.NewDecoder(lf)
+ err = dec.Decode(&cfg)
+ if err != nil {
+ reportErrorf("-config: bad config json, %v", err)
+ }
+ fmt.Fprintf(os.Stdout, "config from --config:\n")
+ cfg.Dump(os.Stdout)
+ } else {
+ cfg, err = pingpong.LoadConfigFromFile(configPath)
+ if err != nil {
+ reportErrorf("%s: bad config json, %v", configPath, err)
+ }
+ fmt.Fprintf(os.Stdout, "config from %#v:\n", configPath)
+ cfg.Dump(os.Stdout)
+ }
} else {
- cfg, err = pingpong.LoadConfigFromFile(cfgPath)
- if err != nil && !os.IsNotExist(err) {
- reportErrorf("Error loading configuration from '%s': %v\n", cfgPath, err)
+ if useDefault {
+ cfg = pingpong.DefaultConfig
+ } else {
+ cfg, err = pingpong.LoadConfigFromFile(dataDirCfgPath)
+ if err != nil && !os.IsNotExist(err) {
+ reportErrorf("Error loading configuration from '%s': %v\n", dataDirCfgPath, err)
+ }
}
}
+ if randSeed == 0 {
+ rand.Seed(time.Now().UnixNano())
+ } else {
+ rand.Seed(randSeed)
+ }
+
if srcAddress != "" {
cfg.SrcAccount = srcAddress
}
@@ -186,10 +226,12 @@ var runCmd = &cobra.Command{
cfg.MinAccountFunds = minAccountFunds
}
- if txnPerSec == 0 {
+ if txnPerSec != 0 {
+ cfg.TxnPerSec = txnPerSec
+ }
+ if cfg.TxnPerSec == 0 {
reportErrorf("cannot set tps to 0")
}
- cfg.TxnPerSec = txnPerSec
if randomFee {
if cfg.MinFee > cfg.MaxFee {
@@ -206,15 +248,15 @@ var runCmd = &cobra.Command{
if randomAmount {
cfg.RandomizeAmt = true
}
- cfg.RandomLease = randomLease
+ cfg.RandomLease = randomLease || cfg.RandomLease
if noRandomAmount {
if randomAmount {
reportErrorf("Error --ra and --nra can't both be specified\n")
}
cfg.RandomizeAmt = false
}
- cfg.RandomizeDst = randomDst
- cfg.Quiet = quietish
+ cfg.RandomizeDst = randomDst || cfg.RandomizeDst
+ cfg.Quiet = quietish || cfg.Quiet
if runTime != "" {
val, err := strconv.ParseUint(runTime, 10, 32)
if err != nil {
@@ -263,7 +305,7 @@ var runCmd = &cobra.Command{
}
if logicProg != "" {
- cfg.Program, err = ioutil.ReadFile(logicProg)
+ cfg.Program, err = os.ReadFile(logicProg)
if err != nil {
reportErrorf("Error opening logic program: %v\n", err)
}
@@ -275,17 +317,27 @@ var runCmd = &cobra.Command{
reportErrorf("Invalid group size: %v\n", groupSize)
}
- if numAsset <= 1000 {
+ if numAsset == 0 {
+ // nop
+ } else if numAsset <= 1000 {
cfg.NumAsset = numAsset
} else {
reportErrorf("Invalid number of assets: %d, (valid number: 0 - 1000)\n", numAsset)
}
- cfg.AppProgOps = appProgOps
- cfg.AppProgHashes = appProgHashes
- cfg.AppProgHashSize = appProgHashSize
+ if appProgOps != 0 {
+ cfg.AppProgOps = appProgOps
+ }
+ if appProgHashes != 0 {
+ cfg.AppProgHashes = appProgHashes
+ }
+ if appProgHashSize != "sha256" {
+ cfg.AppProgHashSize = appProgHashSize
+ }
- if numApp <= 1000 {
+ if numApp == 0 {
+ // nop
+ } else if numApp <= 1000 {
cfg.NumApp = numApp
} else {
reportErrorf("Invalid number of apps: %d, (valid number: 0 - 1000)\n", numApp)
@@ -295,7 +347,9 @@ var runCmd = &cobra.Command{
reportErrorf("Cannot opt in %d times of %d total apps\n", numAppOptIn, numApp)
}
- cfg.NumAppOptIn = numAppOptIn
+ if numAppOptIn != 0 {
+ cfg.NumAppOptIn = numAppOptIn
+ }
if appProgGlobKeys > 0 {
cfg.AppGlobKeys = appProgGlobKeys
@@ -304,10 +358,6 @@ var runCmd = &cobra.Command{
cfg.AppLocalKeys = appProgLocalKeys
}
- if numAsset != 0 && numApp != 0 {
- reportErrorf("only one of numapp and numasset may be specified\n")
- }
-
if rekey {
cfg.Rekey = rekey
if !cfg.RandomLease && !cfg.RandomNote && !cfg.RandomizeFee && !cfg.RandomizeAmt {
@@ -318,7 +368,32 @@ var runCmd = &cobra.Command{
}
}
- cfg.NftAsaPerSecond = nftAsaPerSecond
+ if nftAsaPerSecond != 0 {
+ cfg.NftAsaPerSecond = nftAsaPerSecond
+ }
+
+ if deterministicKeys && generatedAccountsCount == 0 {
+ reportErrorf("deterministicKeys requires setting generatedAccountsCount")
+ }
+ if !deterministicKeys && generatedAccountsCount > 0 {
+ reportErrorf("generatedAccountsCount requires deterministicKeys=true")
+ }
+ if deterministicKeys && numAccounts > generatedAccountsCount {
+ reportErrorf("numAccounts must be <= generatedAccountsCount")
+ }
+ cfg.DeterministicKeys = deterministicKeys || cfg.DeterministicKeys
+ if generatedAccountsCount != 0 {
+ cfg.GeneratedAccountsCount = generatedAccountsCount
+ }
+ if generatedAccountSampleMethod != "" {
+ cfg.GeneratedAccountSampleMethod = generatedAccountSampleMethod
+ }
+
+ cfg.SetDefaultWeights()
+ err = cfg.Check()
+ if err != nil {
+ reportErrorf("%v", err)
+ }
reportInfof("Preparing to initialize PingPong with config:\n")
cfg.Dump(os.Stdout)
@@ -326,20 +401,23 @@ var runCmd = &cobra.Command{
pps := pingpong.NewPingpong(cfg)
// Initialize accounts if necessary
- err = pps.PrepareAccounts(ac)
+ err = pps.PrepareAccounts(&ac)
if err != nil {
reportErrorf("Error preparing accounts for transfers: %v\n", err)
}
if saveConfig {
- cfg.Save(cfgPath)
+ err = cfg.Save(dataDirCfgPath)
+ if err != nil {
+ reportErrorf("%s: could not save config, %v\n", dataDirCfgPath, err)
+ }
}
reportInfof("Preparing to run PingPong with config:\n")
cfg.Dump(os.Stdout)
// Kick off the real processing
- pps.RunPingPong(context.Background(), ac)
+ pps.RunPingPong(context.Background(), &ac)
},
}
diff --git a/cmd/tealdbg/local.go b/cmd/tealdbg/local.go
index c9cba4de3..fa3c5d6fc 100644
--- a/cmd/tealdbg/local.go
+++ b/cmd/tealdbg/local.go
@@ -73,7 +73,7 @@ func txnGroupFromParams(dp *DebugParams) (txnGroup []transactions.SignedTxn, err
}
// 3. Attempt msgp - array of transactions
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
for {
var txn transactions.SignedTxn
err = dec.Decode(&txn)
@@ -124,7 +124,7 @@ func balanceRecordsFromParams(dp *DebugParams) (records []basics.BalanceRecord,
}
// 3. Attempt msgp - a array of records
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
for {
var record basics.BalanceRecord
err = dec.Decode(&record)
diff --git a/cmd/tealdbg/localLedger.go b/cmd/tealdbg/localLedger.go
index cce2ae75d..c0a6cd723 100644
--- a/cmd/tealdbg/localLedger.go
+++ b/cmd/tealdbg/localLedger.go
@@ -19,7 +19,7 @@ package main
import (
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"math/rand"
"net/http"
@@ -198,7 +198,7 @@ func getAppCreatorFromIndexer(indexerURL string, indexerToken string, app basics
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
- msg, _ := ioutil.ReadAll(resp.Body)
+ msg, _ := io.ReadAll(resp.Body)
return basics.Address{}, fmt.Errorf("application response error: %s, status code: %d, request: %s", string(msg), resp.StatusCode, queryString)
}
var appResp ApplicationIndexerResponse
@@ -229,7 +229,7 @@ func getBalanceFromIndexer(indexerURL string, indexerToken string, account basic
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
- msg, _ := ioutil.ReadAll(resp.Body)
+ msg, _ := io.ReadAll(resp.Body)
return basics.AccountData{}, fmt.Errorf("account response error: %s, status code: %d, request: %s", string(msg), resp.StatusCode, queryString)
}
var accountResp AccountIndexerResponse
diff --git a/cmd/tealdbg/main.go b/cmd/tealdbg/main.go
index 9ab3d3dec..75e437c14 100644
--- a/cmd/tealdbg/main.go
+++ b/cmd/tealdbg/main.go
@@ -17,7 +17,6 @@
package main
import (
- "io/ioutil"
"log"
"os"
@@ -205,7 +204,7 @@ func debugLocal(args []string) {
programNames = make([]string, len(args))
programBlobs = make([][]byte, len(args))
for i, file := range args {
- data, err := ioutil.ReadFile(file)
+ data, err := os.ReadFile(file)
if err != nil {
log.Fatalf("Error program reading %s: %s", file, err)
}
@@ -217,7 +216,7 @@ func debugLocal(args []string) {
var err error
var txnBlob []byte
if len(txnFile) > 0 {
- txnBlob, err = ioutil.ReadFile(txnFile)
+ txnBlob, err = os.ReadFile(txnFile)
if err != nil {
log.Fatalf("Error txn reading %s: %s", txnFile, err)
}
@@ -225,7 +224,7 @@ func debugLocal(args []string) {
var balanceBlob []byte
if len(balanceFile) > 0 {
- balanceBlob, err = ioutil.ReadFile(balanceFile)
+ balanceBlob, err = os.ReadFile(balanceFile)
if err != nil {
log.Fatalf("Error balance reading %s: %s", balanceFile, err)
}
@@ -233,7 +232,7 @@ func debugLocal(args []string) {
var ddrBlob []byte
if len(ddrFile) > 0 {
- ddrBlob, err = ioutil.ReadFile(ddrFile)
+ ddrBlob, err = os.ReadFile(ddrFile)
if err != nil {
log.Fatalf("Error dryrun-dump reading %s: %s", ddrFile, err)
}
diff --git a/cmd/updater/update.sh b/cmd/updater/update.sh
index b613cfc32..1da213ccf 100755
--- a/cmd/updater/update.sh
+++ b/cmd/updater/update.sh
@@ -272,7 +272,7 @@ function check_for_updater() {
# try signature validation
if [ "$GPG_VERIFY" = "1" ]; then
- local UPDATER_SIGFILE="$UPDATER_TEMPDIR/updater.sig" UPDATER_PUBKEYFILE="key.pub"
+ local UPDATER_SIGFILE="$UPDATER_TEMPDIR/updater.sig" UPDATER_PUBKEYFILE="$UPDATER_TEMPDIR/key.pub"
# try downloading public key
if curl -sSL "$UPDATER_PUBKEYURL" -o "$UPDATER_PUBKEYFILE"; then
GNUPGHOME="$(mktemp -d)"; export GNUPGHOME
diff --git a/components/mocks/mockCatchpointCatchupAccessor.go b/components/mocks/mockCatchpointCatchupAccessor.go
index bd55d29e9..c92113d70 100644
--- a/components/mocks/mockCatchpointCatchupAccessor.go
+++ b/components/mocks/mockCatchpointCatchupAccessor.go
@@ -103,3 +103,8 @@ func (m *MockCatchpointCatchupAccessor) EnsureFirstBlock(ctx context.Context) (b
func (m *MockCatchpointCatchupAccessor) CompleteCatchup(ctx context.Context) (err error) {
return nil
}
+
+// Ledger returns ledger instance as CatchupAccessorClientLedger interface
+func (m *MockCatchpointCatchupAccessor) Ledger() (l ledger.CatchupAccessorClientLedger) {
+ return nil
+}
diff --git a/config/config.go b/config/config.go
index 023561f68..8e8dc4c27 100644
--- a/config/config.go
+++ b/config/config.go
@@ -34,6 +34,9 @@ const Devnet protocol.NetworkID = "devnet"
// Betanet identifies the 'beta network' use for early releases of feature to the public prior to releasing these to mainnet/testnet
const Betanet protocol.NetworkID = "betanet"
+// Alphanet identifies the 'alpha network' use for performance releases of feature/alphanet to the public prior to releasing these to mainnet/testnet
+const Alphanet protocol.NetworkID = "alphanet"
+
// Devtestnet identifies the 'development network for tests' use for running tests against development and not generally accessible publicly
const Devtestnet protocol.NetworkID = "devtestnet"
diff --git a/config/config_test.go b/config/config_test.go
index c11edd379..1e1915faa 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -20,12 +20,13 @@ import (
"bytes"
"encoding/json"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"reflect"
+ "strings"
"testing"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/protocol"
@@ -245,7 +246,7 @@ func TestConfigExampleIsCorrect(t *testing.T) {
// see their default (zero) values and instead see the
// new default because they won't exist in the old file.
func loadWithoutDefaults(cfg Local) (Local, error) {
- file, err := ioutil.TempFile("", "lwd")
+ file, err := os.CreateTemp("", "lwd")
if err != nil {
return Local{}, err
}
@@ -345,21 +346,35 @@ func TestConsensusUpgrades(t *testing.T) {
currentVersionName := protocol.ConsensusV7
latestVersionName := protocol.ConsensusCurrentVersion
- leadsTo := consensusUpgradesTo(a, currentVersionName, latestVersionName)
+ leadsTo := consensusUpgradesTo(a, currentVersionName, latestVersionName, checkConsensusVersionName)
a.True(leadsTo, "Consensus protocol must have upgrade path from %v to %v", currentVersionName, latestVersionName)
}
-func consensusUpgradesTo(a *require.Assertions, currentName, targetName protocol.ConsensusVersion) bool {
+func checkConsensusVersionName(a *require.Assertions, name string) {
+ // ensure versions come from official specs repo
+ prefix1 := "https://github.com/algorandfoundation/specs/tree/"
+ prefix2 := "https://github.com/algorand/spec/tree/"
+
+ whitelist := map[string]bool{"v7": true, "v8": true, "v9": true, "v10": true, "v11": true, "v12": true}
+ if !whitelist[name] {
+ a.True(strings.HasPrefix(name, prefix1) || strings.HasPrefix(name, prefix2),
+ "Consensus version %s does not start with allowed prefix", name)
+ }
+}
+
+func consensusUpgradesTo(a *require.Assertions, currentName, targetName protocol.ConsensusVersion, nameCheckFn func(*require.Assertions, string)) bool {
+ nameCheckFn(a, string(currentName))
if currentName == targetName {
return true
}
currentVersion, has := Consensus[currentName]
a.True(has, "Consensus map should contain all references consensus versions: Missing '%v'", currentName)
for upgrade := range currentVersion.ApprovedUpgrades {
+ nameCheckFn(a, string(upgrade))
if upgrade == targetName {
return true
}
- return consensusUpgradesTo(a, upgrade, targetName)
+ return consensusUpgradesTo(a, upgrade, targetName, nameCheckFn)
}
return false
}
@@ -537,3 +552,31 @@ func TestLocalVersionField(t *testing.T) {
expectedTag = expectedTag[:len(expectedTag)-1]
require.Equal(t, expectedTag, string(field.Tag))
}
+
+func TestGetNonDefaultConfigValues(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ cfg := GetDefaultLocal()
+
+ // set 4 non-default values
+ cfg.AgreementIncomingBundlesQueueLength = 2
+ cfg.AgreementIncomingProposalsQueueLength = 200
+ cfg.TxPoolSize = 30
+ cfg.Archival = true
+
+ // ask for 2 of them
+ ndmap := GetNonDefaultConfigValues(cfg, []string{"AgreementIncomingBundlesQueueLength", "TxPoolSize"})
+
+ // assert correct
+ expected := map[string]interface{}{
+ "AgreementIncomingBundlesQueueLength": uint64(2),
+ "TxPoolSize": int(30),
+ }
+ assert.Equal(t, expected, ndmap)
+
+ // ask for field that doesn't exist: should skip
+ assert.Equal(t, expected, GetNonDefaultConfigValues(cfg, []string{"Blah", "AgreementIncomingBundlesQueueLength", "TxPoolSize"}))
+
+ // check unmodified defaults
+ assert.Empty(t, GetNonDefaultConfigValues(GetDefaultLocal(), []string{"AgreementIncomingBundlesQueueLength", "TxPoolSize"}))
+}
diff --git a/config/consensus.go b/config/consensus.go
index 9e95d76ec..71b54daa7 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -18,7 +18,6 @@ package config
import (
"encoding/json"
- "io/ioutil"
"os"
"path/filepath"
"time"
@@ -599,7 +598,7 @@ func SaveConfigurableConsensus(dataDirectory string, params ConsensusProtocols)
if err != nil {
return err
}
- err = ioutil.WriteFile(consensusProtocolPath, encodedConsensusParams, 0644)
+ err = os.WriteFile(consensusProtocolPath, encodedConsensusParams, 0644)
return err
}
@@ -1217,6 +1216,31 @@ func initConsensusProtocols() {
vFuture.LogicSigVersion = 8 // When moving this to a release, put a new higher LogicSigVersion here
Consensus[protocol.ConsensusFuture] = vFuture
+
+ // vAlphaX versions are an separate series of consensus parameters and versions for alphanet
+ vAlpha1 := v32
+ vAlpha1.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ vAlpha1.AgreementFilterTimeoutPeriod0 = 2 * time.Second
+ vAlpha1.MaxTxnBytesPerBlock = 5000000
+ Consensus[protocol.ConsensusVAlpha1] = vAlpha1
+
+ vAlpha2 := vAlpha1
+ vAlpha2.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ vAlpha2.AgreementFilterTimeoutPeriod0 = 3500 * time.Millisecond
+ vAlpha2.MaxTxnBytesPerBlock = 5 * 1024 * 1024
+ Consensus[protocol.ConsensusVAlpha2] = vAlpha2
+ vAlpha1.ApprovedUpgrades[protocol.ConsensusVAlpha2] = 10000
+
+ // vAlpha3 and vAlpha4 use the same parameters as v33 and v34
+ vAlpha3 := v33
+ vAlpha3.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ Consensus[protocol.ConsensusVAlpha3] = vAlpha3
+ vAlpha2.ApprovedUpgrades[protocol.ConsensusVAlpha3] = 10000
+
+ vAlpha4 := v34
+ vAlpha4.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ Consensus[protocol.ConsensusVAlpha4] = vAlpha4
+ vAlpha3.ApprovedUpgrades[protocol.ConsensusVAlpha4] = 10000
}
// Global defines global Algorand protocol parameters which should not be overridden.
diff --git a/config/defaultsGenerator/defaultsGenerator.go b/config/defaultsGenerator/defaultsGenerator.go
index 47b5ac51e..4ee4a1671 100644
--- a/config/defaultsGenerator/defaultsGenerator.go
+++ b/config/defaultsGenerator/defaultsGenerator.go
@@ -19,7 +19,6 @@ package main
import (
"flag"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"reflect"
@@ -56,7 +55,7 @@ func main() {
printExit("one or more of the required input arguments was not provided\n")
}
- localDefaultsBytes, err := ioutil.ReadFile(*headerFileName)
+ localDefaultsBytes, err := os.ReadFile(*headerFileName)
if err != nil {
printExit("Unable to load file %s : %v", *headerFileName, err)
}
@@ -70,14 +69,14 @@ func main() {
localDefaultsBytes = append(localDefaultsBytes, autoDefaultsBytes...)
- err = ioutil.WriteFile(*outputfilename, localDefaultsBytes, 0644)
+ err = os.WriteFile(*outputfilename, localDefaultsBytes, 0644)
if err != nil {
printExit("Unable to write file %s : %v", *outputfilename, err)
}
// generate an update json for the example as well.
autoDefaultsBytes = []byte(prettyPrint(config.AutogenLocal, "json"))
- err = ioutil.WriteFile(*jsonExampleFileName, autoDefaultsBytes, 0644)
+ err = os.WriteFile(*jsonExampleFileName, autoDefaultsBytes, 0644)
if err != nil {
printExit("Unable to write file %s : %v", *jsonExampleFileName, err)
}
diff --git a/config/localTemplate.go b/config/localTemplate.go
index 8a8120c5f..c5535e793 100644
--- a/config/localTemplate.go
+++ b/config/localTemplate.go
@@ -477,6 +477,8 @@ func (cfg Local) DNSBootstrap(network protocol.NetworkID) string {
return "devnet.algodev.network"
} else if network == Betanet {
return "betanet.algodev.network"
+ } else if network == Alphanet {
+ return "alphanet.algodev.network"
}
}
return strings.Replace(cfg.DNSBootstrapID, "<network>", string(network), -1)
diff --git a/config/migrate.go b/config/migrate.go
index 9fd9c8607..405314c62 100644
--- a/config/migrate.go
+++ b/config/migrate.go
@@ -198,3 +198,25 @@ func getVersionedDefaultLocalConfig(version uint32) (local Local) {
}
return
}
+
+// GetNonDefaultConfigValues takes a provided cfg and list of field names, and returns a map of all values in cfg
+// that are not set to the default for the latest version.
+func GetNonDefaultConfigValues(cfg Local, fieldNames []string) map[string]interface{} {
+ defCfg := GetDefaultLocal()
+ ret := make(map[string]interface{})
+
+ for _, fieldName := range fieldNames {
+ defField := reflect.ValueOf(defCfg).FieldByName(fieldName)
+ if !defField.IsValid() {
+ continue
+ }
+ cfgField := reflect.ValueOf(cfg).FieldByName(fieldName)
+ if !cfgField.IsValid() {
+ continue
+ }
+ if !reflect.DeepEqual(defField.Interface(), cfgField.Interface()) {
+ ret[fieldName] = cfgField.Interface()
+ }
+ }
+ return ret
+}
diff --git a/config/version.go b/config/version.go
index 37752c90e..20743216e 100644
--- a/config/version.go
+++ b/config/version.go
@@ -33,7 +33,7 @@ const VersionMajor = 3
// VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced.
// Not enforced until after initial public release (x > 0).
-const VersionMinor = 9
+const VersionMinor = 10
// Version is the type holding our full version information.
type Version struct {
diff --git a/crypto/batchverifier.go b/crypto/batchverifier.go
index cce8e06d7..db8ffe642 100644
--- a/crypto/batchverifier.go
+++ b/crypto/batchverifier.go
@@ -53,7 +53,6 @@ const minBatchVerifierAlloc = 16
// Batch verifications errors
var (
ErrBatchVerificationFailed = errors.New("At least one signature didn't pass verification")
- ErrZeroTransactionInBatch = errors.New("Could not validate empty signature set")
)
//export ed25519_randombytes_unsafe
@@ -104,19 +103,19 @@ func (b *BatchVerifier) expand() {
b.signatures = signatures
}
-// GetNumberOfEnqueuedSignatures returns the number of signatures current enqueue onto the bacth verifier object
-func (b *BatchVerifier) GetNumberOfEnqueuedSignatures() int {
+// getNumberOfEnqueuedSignatures returns the number of signatures current enqueue onto the bacth verifier object
+func (b *BatchVerifier) getNumberOfEnqueuedSignatures() int {
return len(b.messages)
}
// Verify verifies that all the signatures are valid. in that case nil is returned
// if the batch is zero an appropriate error is return.
func (b *BatchVerifier) Verify() error {
- if b.GetNumberOfEnqueuedSignatures() == 0 {
- return ErrZeroTransactionInBatch
+ if b.getNumberOfEnqueuedSignatures() == 0 {
+ return nil
}
- var messages = make([][]byte, b.GetNumberOfEnqueuedSignatures())
+ var messages = make([][]byte, b.getNumberOfEnqueuedSignatures())
for i, m := range b.messages {
messages[i] = HashRep(m)
}
diff --git a/crypto/batchverifier_test.go b/crypto/batchverifier_test.go
index 781a80e17..4469da400 100644
--- a/crypto/batchverifier_test.go
+++ b/crypto/batchverifier_test.go
@@ -62,7 +62,7 @@ func TestBatchVerifierBulk(t *testing.T) {
sig := sigSecrets.Sign(msg)
bv.EnqueueSignature(sigSecrets.SignatureVerifier, msg, sig)
}
- require.Equal(t, n, bv.GetNumberOfEnqueuedSignatures())
+ require.Equal(t, n, bv.getNumberOfEnqueuedSignatures())
require.NoError(t, bv.Verify())
}
@@ -122,5 +122,5 @@ func BenchmarkBatchVerifier(b *testing.B) {
func TestEmpty(t *testing.T) {
partitiontest.PartitionTest(t)
bv := MakeBatchVerifier()
- require.Error(t, bv.Verify())
+ require.NoError(t, bv.Verify())
}
diff --git a/crypto/merklesignature/const.go b/crypto/merklesignature/const.go
index c98321b51..767f14aae 100644
--- a/crypto/merklesignature/const.go
+++ b/crypto/merklesignature/const.go
@@ -18,6 +18,7 @@ package merklesignature
import (
"fmt"
+
"github.com/algorand/go-algorand/crypto"
)
@@ -40,7 +41,7 @@ const (
var NoKeysCommitment = Commitment{}
func init() {
- // no keys generated, inner tree of merkle siganture scheme is empty.
+ // no keys generated, inner tree of merkle signature scheme is empty.
o, err := New(KeyLifetimeDefault+1, KeyLifetimeDefault+2, KeyLifetimeDefault)
if err != nil {
panic(fmt.Errorf("initializing empty merkle signature scheme failed, err: %w", err))
diff --git a/crypto/multisig.go b/crypto/multisig.go
index 53386ebc9..62ec187a2 100644
--- a/crypto/multisig.go
+++ b/crypto/multisig.go
@@ -216,33 +216,23 @@ func MultisigAssemble(unisig []MultisigSig) (msig MultisigSig, err error) {
}
// MultisigVerify verifies an assembled MultisigSig
-func MultisigVerify(msg Hashable, addr Digest, sig MultisigSig) (verified bool, err error) {
+func MultisigVerify(msg Hashable, addr Digest, sig MultisigSig) (err error) {
batchVerifier := MakeBatchVerifier()
- if verified, err = MultisigBatchVerify(msg, addr, sig, batchVerifier); err != nil {
+ if err = MultisigBatchPrep(msg, addr, sig, batchVerifier); err != nil {
return
}
- if !verified {
- return
- }
- if batchVerifier.GetNumberOfEnqueuedSignatures() == 0 {
- return true, nil
- }
- if err = batchVerifier.Verify(); err != nil {
- return false, err
- }
- return true, nil
+ err = batchVerifier.Verify()
+ return
}
-// MultisigBatchVerify verifies an assembled MultisigSig.
-// it is the caller responsibility to call batchVerifier.verify()
-func MultisigBatchVerify(msg Hashable, addr Digest, sig MultisigSig, batchVerifier *BatchVerifier) (verified bool, err error) {
- verified = false
+// MultisigBatchPrep performs checks on the assembled MultisigSig and adds to the batch.
+// The caller must call batchVerifier.verify() to verify it.
+func MultisigBatchPrep(msg Hashable, addr Digest, sig MultisigSig, batchVerifier *BatchVerifier) (err error) {
// short circuit: if msig doesn't have subsigs or if Subsigs are empty
// then terminate (the upper layer should now verify the unisig)
if (len(sig.Subsigs) == 0 || sig.Subsigs[0] == MultisigSubsig{}) {
- err = errInvalidNumberOfSignature
- return
+ return errInvalidNumberOfSignature
}
// check the address is correct
@@ -251,20 +241,17 @@ func MultisigBatchVerify(msg Hashable, addr Digest, sig MultisigSig, batchVerifi
return
}
if addr != addrnew {
- err = errInvalidAddress
- return
+ return errInvalidAddress
}
// check that we don't have too many multisig subsigs
if len(sig.Subsigs) > maxMultisig {
- err = errInvalidNumberOfSignature
- return
+ return errInvalidNumberOfSignature
}
// check that we don't have too few multisig subsigs
if len(sig.Subsigs) < int(sig.Threshold) {
- err = errInvalidNumberOfSignature
- return
+ return errInvalidNumberOfSignature
}
// checks the number of non-blank signatures is no less than threshold
@@ -275,27 +262,23 @@ func MultisigBatchVerify(msg Hashable, addr Digest, sig MultisigSig, batchVerifi
}
}
if counter < sig.Threshold {
- err = errInvalidNumberOfSignature
- return
+ return errInvalidNumberOfSignature
}
// checks individual signature verifies
- var verifiedCount int
+ var sigCount int
for _, subsigi := range sig.Subsigs {
if (subsigi.Sig != Signature{}) {
batchVerifier.EnqueueSignature(subsigi.Key, msg, subsigi.Sig)
- verifiedCount++
+ sigCount++
}
}
// sanity check. if we get here then every non-blank subsig should have
// been verified successfully, and we should have had enough of them
- if verifiedCount < int(sig.Threshold) {
- err = errInvalidNumberOfSignature
- return
+ if sigCount < int(sig.Threshold) {
+ return errInvalidNumberOfSignature
}
-
- verified = true
return
}
diff --git a/crypto/multisig_test.go b/crypto/multisig_test.go
index 28eec2459..5035300d2 100644
--- a/crypto/multisig_test.go
+++ b/crypto/multisig_test.go
@@ -136,15 +136,13 @@ func TestMultisig(t *testing.T) {
require.NoError(t, err, "Multisig: unexpected failure in generating sig from pk 2")
msig, err = MultisigAssemble(sigs)
require.NoError(t, err, "Multisig: unexpected failure when assembling multisig")
- verify, err := MultisigVerify(txid, addr, msig)
+ err = MultisigVerify(txid, addr, msig)
require.NoError(t, err, "Multisig: unexpected verification failure with err")
- require.True(t, verify, "Multisig: verification failed, verify flag was false")
//test3: use the batch verification
br := MakeBatchVerifier()
- verify, err = MultisigBatchVerify(txid, addr, msig, br)
+ err = MultisigBatchPrep(txid, addr, msig, br)
require.NoError(t, err, "Multisig: unexpected verification failure with err")
- require.True(t, verify, "Multisig: verification failed, verify flag was false")
res := br.Verify()
require.NoError(t, res, "Multisig: batch verification failed")
}
@@ -200,8 +198,7 @@ func TestMultisigAddAndMerge(t *testing.T) {
require.NoError(t, err, "Multisig: unexpected failure signing with pk 2")
err = MultisigAdd(sigs, &msig1)
require.NoError(t, err, "Multisig: unexpected err adding pk 2 signature to that of pk 0 and 1")
- verify, err := MultisigVerify(txid, addr, msig1)
- require.True(t, verify, "Multisig: verification failed, verify flag was false")
+ err = MultisigVerify(txid, addr, msig1)
require.NoError(t, err, "Multisig: unexpected verification failure with err")
// msig2 = {sig3, sig4}
@@ -215,9 +212,8 @@ func TestMultisigAddAndMerge(t *testing.T) {
// merge two msigs and then verify
msigt, err := MultisigMerge(msig1, msig2)
require.NoError(t, err, "Multisig: unexpected failure merging multisig messages {0, 1, 2} and {3, 4}")
- verify, err = MultisigVerify(txid, addr, msigt)
+ err = MultisigVerify(txid, addr, msigt)
require.NoError(t, err, "Multisig: unexpected verification failure with err")
- require.True(t, verify, "Multisig: verification failed, verify flag was false")
// create a valid duplicate on purpose
// msig1 = {sig0, sig1, sig2}
@@ -230,9 +226,8 @@ func TestMultisigAddAndMerge(t *testing.T) {
require.NoError(t, err, "Multisig: unexpected failure adding pk 2 signature to that of pk 3 and 4")
msigt, err = MultisigMerge(msig1, msig2)
require.NoError(t, err, "Multisig: unexpected failure merging multisig messages {0, 1, 2} and {2, 3, 4}")
- verify, err = MultisigVerify(txid, addr, msigt)
+ err = MultisigVerify(txid, addr, msigt)
require.NoError(t, err, "Multisig: unexpected verification failure with err")
- require.True(t, verify, "Multisig: verification failed, verify flag was false")
return
}
@@ -254,12 +249,10 @@ func TestEmptyMultisig(t *testing.T) {
addr, err := MultisigAddrGen(version, threshold, pks)
require.NoError(t, err, "Multisig: unexpected failure generating message digest")
emptyMutliSig := MultisigSig{Version: version, Threshold: threshold, Subsigs: make([]MultisigSubsig, 0)}
- verify, err := MultisigVerify(txid, addr, emptyMutliSig)
- require.False(t, verify, "Multisig: verification succeeded, it should failed")
+ err = MultisigVerify(txid, addr, emptyMutliSig)
require.Error(t, err, "Multisig: did not return error as expected")
br := MakeBatchVerifier()
- verify, err = MultisigBatchVerify(txid, addr, emptyMutliSig, br)
- require.False(t, verify, "Multisig: verification succeeded, it should failed")
+ err = MultisigBatchPrep(txid, addr, emptyMutliSig, br)
require.Error(t, err, "Multisig: did not return error as expected")
}
@@ -282,12 +275,10 @@ func TestIncorrectAddrresInMultisig(t *testing.T) {
MutliSig, err := MultisigSign(txid, addr, version, threshold, pks, *secrets)
require.NoError(t, err, "Multisig: could not create mutlisig")
addr[0] = addr[0] + 1
- verify, err := MultisigVerify(txid, addr, MutliSig)
- require.False(t, verify, "Multisig: verification succeeded, it should failed")
+ err = MultisigVerify(txid, addr, MutliSig)
require.Error(t, err, "Multisig: did not return error as expected")
br := MakeBatchVerifier()
- verify, err = MultisigBatchVerify(txid, addr, MutliSig, br)
- require.False(t, verify, "Multisig: verification succeeded, it should failed")
+ err = MultisigBatchPrep(txid, addr, MutliSig, br)
require.Error(t, err, "Multisig: did not return error as expected")
}
@@ -321,12 +312,10 @@ func TestMoreThanMaxSigsInMultisig(t *testing.T) {
msig, err := MultisigAssemble(sigs)
require.NoError(t, err, "Multisig: error assmeble multisig")
- verify, err := MultisigVerify(txid, addr, msig)
- require.False(t, verify, "Multisig: verification succeeded, it should failed")
+ err = MultisigVerify(txid, addr, msig)
require.Error(t, err, "Multisig: did not return error as expected")
br := MakeBatchVerifier()
- verify, err = MultisigBatchVerify(txid, addr, msig, br)
- require.False(t, verify, "Multisig: verification succeeded, it should failed")
+ err = MultisigBatchPrep(txid, addr, msig, br)
require.Error(t, err, "Multisig: did not return error as expected")
}
@@ -360,12 +349,10 @@ func TestOneSignatureIsEmpty(t *testing.T) {
msig, err := MultisigAssemble(sigs)
require.NoError(t, err, "Multisig: error assmeble multisig")
msig.Subsigs[0].Sig = Signature{}
- verify, err := MultisigVerify(txid, addr, msig)
- require.False(t, verify, "Multisig: verification succeeded, it should failed")
+ err = MultisigVerify(txid, addr, msig)
require.Error(t, err, "Multisig: did not return error as expected")
br := MakeBatchVerifier()
- verify, err = MultisigBatchVerify(txid, addr, msig, br)
- require.False(t, verify, "Multisig: verification succeeded, it should failed")
+ err = MultisigBatchPrep(txid, addr, msig, br)
require.Error(t, err, "Multisig: did not return error as expected")
}
@@ -401,13 +388,11 @@ func TestOneSignatureIsInvalid(t *testing.T) {
sigs[1].Subsigs[1].Sig[5] = sigs[1].Subsigs[1].Sig[5] + 1
msig, err := MultisigAssemble(sigs)
require.NoError(t, err, "Multisig: error assmeble multisig")
- verify, err := MultisigVerify(txid, addr, msig)
- require.False(t, verify, "Multisig: verification succeeded, it should failed")
+ err = MultisigVerify(txid, addr, msig)
require.Error(t, err, "Multisig: did not return error as expected")
br := MakeBatchVerifier()
- verify, err = MultisigBatchVerify(txid, addr, msig, br)
+ err = MultisigBatchPrep(txid, addr, msig, br)
require.NoError(t, err, "Multisig: did not return error as expected")
- require.True(t, verify, "Multisig: verification succeeded, it should failed")
res := br.Verify()
require.Error(t, res, "Multisig: batch verification passed on broken signature")
@@ -455,15 +440,13 @@ func TestMultisigLessThanTrashold(t *testing.T) {
msig, err = MultisigAssemble(sigs)
require.NoError(t, err, "should be able to detect insufficient signatures for assembling")
msig.Subsigs[1].Sig = BlankSignature
- verify, err := MultisigVerify(txid, addr, msig)
- require.False(t, verify, "Multisig: verification passed, should have failed")
+ err = MultisigVerify(txid, addr, msig)
require.Error(t, err, "Multisig: expected verification failure with err")
msig, err = MultisigAssemble(sigs)
require.NoError(t, err, "should be able to detect insufficient signatures for assembling")
msig.Subsigs = msig.Subsigs[:len(msig.Subsigs)-1]
- verify, err = MultisigVerify(txid, addr, msig)
- require.False(t, verify, "Multisig: verification passed, should have failed")
+ err = MultisigVerify(txid, addr, msig)
require.Error(t, err, "Multisig: expected verification failure with err")
}
diff --git a/crypto/stateproof/coinGenerator.go b/crypto/stateproof/coinGenerator.go
index 320232fba..fa88c5770 100644
--- a/crypto/stateproof/coinGenerator.go
+++ b/crypto/stateproof/coinGenerator.go
@@ -18,9 +18,10 @@ package stateproof
import (
"encoding/binary"
- "golang.org/x/crypto/sha3"
"math/big"
+ "golang.org/x/crypto/sha3"
+
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/protocol"
)
@@ -75,7 +76,7 @@ func makeCoinGenerator(choice *coinChoiceSeed) coinGenerator {
choice.version = VersionForCoinGenerator
rep := crypto.HashRep(choice)
shk := sha3.NewShake256()
- shk.Write(rep)
+ shk.Write(rep) //nolint:errcheck // ShakeHash.Write may panic, but does not return error
threshold := prepareRejectionSamplingThreshold(choice.signedWeight)
return coinGenerator{shkContext: shk, signedWeight: choice.signedWeight, threshold: threshold}
@@ -111,7 +112,7 @@ func (cg *coinGenerator) getNextCoin() uint64 {
var randNumFromXof uint64
for {
var shakeDigest [8]byte
- cg.shkContext.Read(shakeDigest[:])
+ cg.shkContext.Read(shakeDigest[:]) //nolint:errcheck // ShakeHash.Read never returns error
randNumFromXof = binary.LittleEndian.Uint64(shakeDigest[:])
z := &big.Int{}
diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json
index f5cd705e5..6744d6149 100644
--- a/daemon/algod/api/algod.oas2.json
+++ b/daemon/algod/api/algod.oas2.json
@@ -523,6 +523,60 @@
}
]
},
+ "/v2/blocks/{round}/hash": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Get the block hash for the block on the given round.",
+ "operationId": "GetBlockHash",
+ "parameters": [
+ {
+ "minimum": 0,
+ "type": "integer",
+ "description": "The round from which to fetch block hash information.",
+ "name": "round",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "$ref": "#/responses/BlockHashResponse"
+ },
+ "400": {
+ "description": "Bad Request - Non integer number",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "404": {
+ "description": "None existing block ",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
+ }
+ }
+ }
+ },
"/v2/blocks/{round}/transactions/{txid}/proof": {
"get": {
"produces": [
@@ -3024,6 +3078,21 @@
}
}
},
+ "BlockHashResponse": {
+ "description": "Hash of a block header.",
+ "schema": {
+ "type": "object",
+ "required": [
+ "blockHash"
+ ],
+ "properties": {
+ "blockHash": {
+ "description": "Block header hash.",
+ "type": "string"
+ }
+ }
+ }
+ },
"TransactionProofResponse": {
"description": "Proof of transaction in a block.",
"schema": {
diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml
index 50bd0494d..0a15f51bb 100644
--- a/daemon/algod/api/algod.oas3.yml
+++ b/daemon/algod/api/algod.oas3.yml
@@ -304,6 +304,25 @@
},
"description": "Asset information"
},
+ "BlockHashResponse": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "blockHash": {
+ "description": "Block header hash.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "blockHash"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Hash of a block header."
+ },
"BlockResponse": {
"content": {
"application/json": {
@@ -2701,6 +2720,89 @@
"summary": "Get the block for the given round."
}
},
+ "/v2/blocks/{round}/hash": {
+ "get": {
+ "operationId": "GetBlockHash",
+ "parameters": [
+ {
+ "description": "The round from which to fetch block hash information.",
+ "in": "path",
+ "name": "round",
+ "required": true,
+ "schema": {
+ "minimum": 0,
+ "type": "integer"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "blockHash": {
+ "description": "Block header hash.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "blockHash"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Hash of a block header."
+ },
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Bad Request - Non integer number"
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
+ },
+ "404": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "None existing block "
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Get the block hash for the block on the given round."
+ }
+ },
"/v2/blocks/{round}/lightheader/proof": {
"get": {
"operationId": "GetLightBlockHeaderProof",
diff --git a/daemon/algod/api/algod2.oas2.json b/daemon/algod/api/algod2.oas2.json
deleted file mode 100644
index e69de29bb..000000000
--- a/daemon/algod/api/algod2.oas2.json
+++ /dev/null
diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go
index b5c309756..b5d25816d 100644
--- a/daemon/algod/api/client/restClient.go
+++ b/daemon/algod/api/client/restClient.go
@@ -23,7 +23,6 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
"net/http"
"net/url"
"strings"
@@ -135,7 +134,7 @@ func extractError(resp *http.Response) error {
return nil
}
- errorBuf, _ := ioutil.ReadAll(resp.Body) // ignore returned error
+ errorBuf, _ := io.ReadAll(resp.Body) // ignore returned error
errorString := filterASCII(string(errorBuf))
if resp.StatusCode == http.StatusUnauthorized {
@@ -221,7 +220,7 @@ func (client RestClient) submitForm(response interface{}, path string, request i
return fmt.Errorf("can only decode raw response into type implementing v1.RawResponse")
}
- bodyBytes, err := ioutil.ReadAll(resp.Body)
+ bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
@@ -638,7 +637,7 @@ func (client RestClient) doGetWithQuery(ctx context.Context, path string, queryA
return
}
- bytes, err := ioutil.ReadAll(resp.Body)
+ bytes, err := io.ReadAll(resp.Body)
if err != nil {
return
}
diff --git a/daemon/algod/api/server/v2/generated/private/routes.go b/daemon/algod/api/server/v2/generated/private/routes.go
index 921f69d94..2603a9022 100644
--- a/daemon/algod/api/server/v2/generated/private/routes.go
+++ b/daemon/algod/api/server/v2/generated/private/routes.go
@@ -311,162 +311,163 @@ func RegisterHandlers(router interface {
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9+3PcNtLgv4Ka/aoc+4Yzkl+7VlXqO8VyEl0cR2Up2bvP9iUYsmcGKxJgAFDSxKf/",
- "/QoNgARJcIZ6rHKp80+2hng0Go1Gv/F5koqiFBy4VpODz5OSSlqABol/0TQVFdcJy8xfGahUslIzwScH",
- "/htRWjK+mkwnzPxaUr2eTCecFtC0Mf2nEwm/V0xCNjnQsoLpRKVrKKgZWG9K07oe6SpZicQNcWiHOD6a",
- "XG/5QLNMglJ9KH/i+YYwnuZVBkRLyhVNzSdFLpleE71mirjOhHEiOBCxJHrdakyWDPJMzfwif69AboJV",
- "usmHl3TdgJhIkUMfzteiWDAOHiqogao3hGhBMlhiozXVxMxgYPUNtSAKqEzXZCnkDlAtECG8wKticvBh",
- "ooBnIHG3UmAX+N+lBPgDEk3lCvTk0zS2uKUGmWhWRJZ27LAvQVW5VgTb4hpX7AI4Mb1m5MdKabIAQjl5",
- "/+1r8uzZs1dmIQXVGjJHZIOramYP12S7Tw4mGdXgP/dpjeYrISnPkrr9+29f4/ynboFjW1GlIH5YDs0X",
- "cnw0tADfMUJCjGtY4T60qN/0iByK5ucFLIWEkXtiG9/rpoTz/6m7klKdrkvBuI7sC8GvxH6O8rCg+zYe",
- "VgPQal8aTEkz6Ie95NWnz/vT/b3rv304TP7L/fni2fXI5b+ux92BgWjDtJISeLpJVhIonpY15X18vHf0",
+ "H4sIAAAAAAAC/+x9+3PcNtLgv4Ka/ar8uOGM5NeuVbX1nWI5WV0cR2Up2bvP9iUYsmcGKxJgAFDSxKf/",
+ "/QoNgARJcIZ6rHKp80+2hng0Go1Gv/FlkoqiFBy4VpODL5OSSlqABol/0TQVFdcJy8xfGahUslIzwScH",
+ "/htRWjK+mkwnzPxaUr2eTCecFtC0Mf2nEwm/VUxCNjnQsoLpRKVrKKgZWG9K07oe6SpZicQNcWiHOD6a",
+ "XG/5QLNMglJ9KH/k+YYwnuZVBkRLyhVNzSdFLpleE71mirjOhHEiOBCxJHrdakyWDPJMzfwif6tAboJV",
+ "usmHl3TdgJhIkUMfzjeiWDAOHiqogao3hGhBMlhiozXVxMxgYPUNtSAKqEzXZCnkDlAtECG8wKticvBx",
+ "ooBnIHG3UmAX+N+lBPgdEk3lCvTk8zS2uKUGmWhWRJZ27LAvQVW5VgTb4hpX7AI4Mb1m5IdKabIAQjn5",
+ "8O0b8vz589dmIQXVGjJHZIOramYP12S7Tw4mGdXgP/dpjeYrISnPkrr9h2/f4PynboFjW1GlIH5YDs0X",
+ "cnw0tADfMUJCjGtY4T60qN/0iByK5ucFLIWEkXtiG9/rpoTz/6G7klKdrkvBuI7sC8GvxH6O8rCg+zYe",
+ "VgPQal8aTEkz6Me95PXnL/vT/b3rv3w8TP7L/fny+fXI5b+px92BgWjDtJISeLpJVhIonpY15X18fHD0",
"oNaiyjOyphe4+bRAVu/6EtPXss4LmleGTlgqxWG+EopQR0YZLGmVa+InJhXPDZsyozlqJ0yRUooLlkE2",
"Ndz3cs3SNUmpskNgO3LJ8tzQYKUgG6K1+Oq2HKbrECUGrlvhAxf0/y4ymnXtwARcITdI0lwoSLTYcT35",
"G4fyjIQXSnNXqZtdVuRsDQQnNx/sZYu444am83xDNO5rRqgilPiraUrYkmxERS5xc3J2jv3dagzWCmKQ",
- "hpvTukfN4R1CXw8ZEeQthMiBckSeP3d9lPElW1USFLlcg167O0+CKgVXQMTiX5Bqs+3/4/Snd0RI8iMo",
- "RVdwQtNzAjwV2fAeu0ljN/i/lDAbXqhVSdPz+HWds4JFQP6RXrGiKgivigVIs1/+ftCCSNCV5EMA2RF3",
- "0FlBr/qTnsmKp7i5zbQtQc2QElNlTjczcrwkBb36em/qwFGE5jkpgWeMr4i+4oNCmpl7N3iJFBXPRsgw",
- "2mxYcGuqElK2ZJCRepQtkLhpdsHD+M3gaSSrABw/yCA49Sw7wOFwFaEZc3TNF1LSFQQkMyM/O86FX7U4",
- "B14zOLLY4KdSwgUTlao7DcCIU28Xr7nQkJQSlixCY6cOHYZ72DaOvRZOwEkF15RxyAznRaCFBsuJBmEK",
- "JtyuzPSv6AVV8PL50AXefB25+0vR3fWtOz5qt7FRYo9k5F40X92BjYtNrf4jlL9wbsVWif25t5FsdWau",
- "kiXL8Zr5l9k/j4ZKIRNoIcJfPIqtONWVhIOP/In5iyTkVFOeUZmZXwr7049VrtkpW5mfcvvTW7Fi6Slb",
- "DSCzhjWqTWG3wv5jxouzY30VVRreCnFeleGC0pZWutiQ46OhTbZj3pQwD2tVNtQqzq68pnHTHvqq3sgB",
- "IAdxV1LT8Bw2Egy0NF3iP1dLpCe6lH+Yf8oyN711uYyh1tCxu2/RNuBsBodlmbOUGiS+d5/NV8MEwGoJ",
- "tGkxxwv14HMAYilFCVIzOygtyyQXKc0TpanGkf5DwnJyMPnbvDGuzG13NQ8mf2t6nWInI49aGSehZXmD",
- "MU6MXKO2MAvDoPETsgnL9lAiYtxuoiElZlhwDheU61mjj7T4QX2AP7iZGnxbUcbiu6NfDSKc2IYLUFa8",
- "tQ0fKRKgniBaCaIVpc1VLhb1D18dlmWDQfx+WJYWHygaAkOpC66Y0uoxLp82Jymc5/hoRr4Lx0Y5W/B8",
- "Yy4HK2qYu2Hpbi13i9WGI7eGZsRHiuB2CjkzW+PRYGT4+6A41BnWIjdSz05aMY2/d21DMjO/j+r81yCx",
- "ELfDxIValMOcVWDwl0Bz+apDOX3CcbacGTns9r0d2ZhR4gRzK1rZup923C14rFF4KWlpAXRf7F3KOGpg",
- "tpGF9Y7cdCSji8IcnOGA1hCqW5+1nechCgmSQgeGb3KRnt/DeV+YcfrHDocna6AZSJJRTYNz5c5L/M7G",
- "jt9jP+QIICOC/U/4H5oT89kQvuGLdlijsDOkXxGY1zOj51rp2c5kGqD+LUhhVVtiVNIbQfm6mbzHIyxa",
- "xvCIN1abJtjDL8IsvbGVHS6EvB29dAiBk8YCSKgZNTgu087OYtOqTBx+IlYE26AzUON06QuTIYa6w8dw",
- "1cLCqab/BiwoM+p9YKE90H1jQRQly+EezuuaqnV/EUate/aUnH5/+GL/6a9PX7w0ekkpxUrSgiw2GhT5",
- "yknTROlNDo/7K0N5tsp1fPSXz73dqD1ubBwlKplCQcv+UNYeZS8t24yYdn2stdGMq64BHHMsz8CwF4t2",
- "Yk2tBrQjpsydWCzuZTOGEJY1s2TEQZLBTmK66fKaaTbhEuVGVvehfICUQkYsInjEtEhFnlyAVExEjNsn",
- "rgVxLbxAUnZ/t9CSS6qImRuNdRXPQM5ilKWvOILGNBRq14Vqhz674g1u3IBUSrrpod+uN7I6N++YfWkj",
- "39t+FClBJvqKkwwW1aoluy6lKAglGXbEi+MtW611cI+eSCGW9y5uRGeJLQk/oIGd5KaPu+msbIAAvxMZ",
- "GEWpUvfA3pvBGuwZyglxRhei0oQSLjJArapSccY/4JpDnwC6MnR4l+i1FSwWYCT4lFZmtVVJ0FDfo8Wm",
- "Y0JTS0UJokYNWDJrE7RtZaezbp9cAs2MZA+ciIUzFzpDJi6SopdBe9bprp2IrtOCq5QiBaWMRmbl7J2g",
- "+XaWLPUWPCHgCHA9C1GCLKm8JbBaaJrvABTbxMCt5URnY+1DPW76bRvYnTzcRiqNUmapwAil5sDloGEI",
- "hSNxcgESbY3/1v3zk9x2+6pyIBLAiVZnrEDdjlMuFKSCZyo6WE6VTnYdW9OoJf+ZFQQnJXZSceAB+8Jb",
- "qrS1ODOeoS5g2Q3OYw0PZophgAevQDPyL/7264+dGj7JVaXqq1BVZSmkhiy2Bg5XW+Z6B1f1XGIZjF3f",
- "t1qQSsGukYewFIzvkGVXYhFEdW2YcS6Z/uLQfGHugU0UlS0gGkRsA+TUtwqwG3pDBwAximPdEwmHqQ7l",
- "1C7Y6URpUZbm/Omk4nW/ITSd2taH+uembZ+4qG74eibAzK49TA7yS4tZ6wdfUyO048ikoOfmbkIR3JrG",
- "+zCbw5goxlNItlG+OZanplV4BHYc0gHtx0XaBLN1DkeHfqNEN0gEO3ZhaMEDqtgJlZqlrERJ4gfY3Ltg",
- "1Z0gatIhGWjKjHoQfLBCVhn2J9bX0R3zdoLWKKm5D35PbI4sJ2cKL4w28OewQdvuiXWinwWu93uQFCOj",
- "mtNNOUFAvWvOXMhhE7iiqc435prTa9iQS5BAVLUomNY2KqItSGpRJuEAUYvElhmdTcg6oP0OjDFSneJQ",
- "wfL6WzGdWLFlO3xnHcGlhQ4nMJVC5CNs5z1kRCEYZVsnpTC7zlwQjo/U8JTUAtIJMWgQrJnnI9VCM66A",
- "/C9RkZRyFMAqDfWNICSyWbx+zQzmAqvndFb0BkOQQwFWrsQvT550F/7kidtzpsgSLn3kmmnYRceTJ6gl",
- "nQilW4frHlR0c9yOI7wdTTXmonAyXJenzHbaItzIY3bypDN4bd8xZ0opR7hm+XdmAJ2TeTVm7SGNrKla",
- "7147jjvKChMMHVs37ju6EP89OnwzdAy6/sSB46X5OOR7MfJVvrkHPm0HIhJKCQpPVaiXKPtVLMPgRnfs",
- "1EZpKPqqve3664Bg896LBT0pU/CccUgKwWETjednHH7Ej7He9mQPdEYeO9S3Kza14O+A1Z5nDBXeFb+4",
- "2wEpn9ROx3vY/O64HatOGNaJWinkJaEkzRnqrIIrLatUf+QUpeLgLEdM/V7WH9aTXvsmccUsoje5oT5y",
- "qgwOa1k5ap5cQkQL/hbAq0uqWq1A6Y58sAT4yF0rxknFmca5CrNfid2wEiTa22e2ZUE3ZElzVOv+ACnI",
- "otLtGxOjz5Q2Wpc1MZlpiFh+5FSTHIwG+iPjZ1c4nA/y8jTDQV8KeV5jYRY9DyvgoJhK4i6J7+zX76la",
- "++Wbhp5Jus7WiGLGb0LUNhpa4e3/+6v/PPhwmPwXTf7YS179t/mnz8+vHz/p/fj0+uuv/0/7p2fXXz/+",
- "z/+I7ZSHPRYb5SA/PnLS5PERigyNcakH+4NZHArGkyiRna2BFIxjiG2HtshXRvDxBPS4MVO5Xf/I9RU3",
- "hHRBc5ZRfTty6LK43lm0p6NDNa2N6CiQfq2fYi7dlUhKmp6jR2+yYnpdLWapKOZeip6vRC1RzzMKheD4",
- "LZvTks1VCen8Yn/HlX4HfkUi7KrDZG8tEPT9gfF4RjRZuhBFPHnLiluiqJQzUmK4jvfLiOW0jlm1uWoH",
- "BAMa19Q7Fd2fT1+8nEybQMT6u9HU7ddPkTPBsqtYuGkGVzFJzR01PGKPFCnpRoGO8yGEPeqCsn6LcNgC",
- "jIiv1qx8eJ6jNFvEeeX3jjE6je+KH3MbgGFOIppnN87qI5YPD7eWABmUeh3LYWnJHNiq2U2AjkullOIC",
- "+JSwGcy6Gle2AuWdYTnQJeZSoIlRjAnqqs+BJTRPFQHWw4WMUmti9INisuP719OJEyPUvUv2buAYXN05",
- "a1us/1sL8ui7N2dk7livemQjn+3QQaxqxJLhwrFazjbDzWzmng39/sg/8iNYMs7M94OPPKOazhdUsVTN",
- "KwXyG5pTnsJsJciBj/A6opp+5D2ZbTC5NoitI2W1yFlKzkPZuiFPmzDVH+Hjxw+G43/8+KnnuelLwm6q",
- "KH+xEySXTK9FpROXEZJIuKQyi4Cu6owAHNnmc22bdUrc2JYVu4wTN36c59GyVN3I4P7yyzI3yw/IULm4",
- "V7NlRGkhvVRjRB0LDe7vO+EuBkkvfTpRpUCR3wpafmBcfyLJx2pv7xmQVqjsb054MDS5KaFl87pV5HLX",
- "3oULtxoSXGlJk5KuQEWXr4GWuPsoeRdoXc1zgt1aIbo+oAWHahbg8TG8ARaOG4cb4uJObS+f2htfAn7C",
- "LcQ2RtxonBa33a8gaPfW29UJ/O3tUqXXiTnb0VUpQ+J+Z+qMv5URsrwnSbEVN4fAJUcugKRrSM8hwzwt",
- "KEq9mba6e2elE1k962DK5jPaqEJMukHz4AJIVWbUCfWUb7rZDwq09ikf7+EcNmeiydm5SbpDO/peDR1U",
- "pNRAujTEGh5bN0Z3853jGyOOy9IHsWPApieLg5oufJ/hg2xF3ns4xDGiaEWHDyGCyggiLPEPoOAWCzXj",
- "3Yn0Y8sz+srC3nyR9EfP+4lr0qhhznkdrgaD3u33AjA5WlwqsqBGbhcur9dGmAdcrFJ0BQMScmihHRnH",
- "3bLq4iC77r3oTSeW3Qutd99EQbaNE7PmKKWA+WJIBZWZTsiCn8k6AXAFM4LlOhzCFjmKSXW0hGU6VLYs",
- "5bb+wBBocQIGyRuBw4PRxkgo2ayp8inHmJntz/IoGeDfmDGxLU/uOPC2B+nXdRac57ndc9rTLl22nE+R",
- "83lxoWo5IsfNSPgYABbbDsFRAMogh5VduG3sCaXJ3mg2yMDx03KZMw4kiTnuqVIiZTZnvLlm3Bxg5OMn",
- "hFhjMhk9QoyMA7DRuYUDk3ciPJt8dRMgucs+oX5sdIsFf0M87NKGZhmRR5SGhTM+EFTnOQB10R71/dWJ",
- "OcJhCONTYtjcBc0Nm3MaXzNIL10LxdZOcpZzrz4eEme32PLtxXKjNdmr6DarCWUmD3RcoNsC8XZRIrYF",
- "CvHlbFk1robu0jFTD1zfQ7j6Kkj0uhUAHU2/KYnkNL+dGlr7bu7fZA1LnzYJzD6qNEb7Q/QT3aUB/PVN",
- "EHVq1kn3uo4q6W23azsrLZCfYqzYnJG+r6PvUVGQA0rESUuCSM5jHjAj2AOy21PfLdDcMfeN8s3jwJcv",
- "YcWUhsYWbW4l71x5aNscxZR7IZbDq9OlXJr1vRei5tE2pxM7tpb54Cu4EBqSJZNKJ2jIjy7BNPpWoUb5",
- "rWkaFxTa0QK2+gzL4rwBpz2HTZKxvIrTq5v3hyMz7bvaCKOqxTlsUBwEmq7JAqslRWOItkxtw8y2Lvit",
- "XfBbem/rHXcaTFMzsTTk0p7jL3IuOpx3GzuIEGCMOPq7NojSLQwSL/4jyHUsPSwQGuzhzEzD2TbTY+8w",
- "ZX7sndEXForhO8qOFF1LoC1vXQVDH4lR95gOig31Ux4GzgAtS5ZddQyBdtRBdZHeSNv3WdwdLODuusF2",
- "YCAw+sWiaiWodsJ+I93aslE8XNtsFGbO2mn1IUMIp2LKFz3sI8qQNlbm2oWrM6D5D7D5xbTF5Uyup5O7",
- "2Q1juHYj7sD1Sb29UTyjh9vakVpugBuinJalFBc0T5x1dYg0pbhwpInNvTH2gVld3IZ39ubw7YkD/3o6",
- "SXOgMqlFhcFVYbvyL7MqWxtg4ID4ompG4fEyuxUlg82vc7ZDi+zlGlwBq0Aa7VXaaKztwVF0FtplPNBm",
- "p73VOQbsErc4CKCs/QON7cq6B9ouAXpBWe6NRh7agaAYXNy4ci1RrhAOcGfXQuAhSu6V3fROd/x0NNS1",
- "gyeFc20psVXYKnKKCN71HxsREm1RSKoFxToZ1iTQZ068KhJz/BKVszRuYOQLZYiDW8eRaUyw8YAwakas",
- "2IAfklcsGMs0UyMU3Q6QwRxRZPqaK0O4WwhX/rfi7PcKCMuAa/NJ4qnsHFQsTOJMzf3r1MgO/bncwNY8",
- "3Qx/FxkjrBHTvfEQiO0CRuim6oF7VKvMfqG1Ocb8ENjjb+DtDmfsXYlbPNWOPhw12xjAddvdFFbr7fM/",
- "Qxi2stvuUsFeeXXFagbmiJb+ZSpZSvEHxPU8VI8jIfe+Kg7DEI8/gM8imUtdFlNbd5oKxs3sg9s9JN2E",
- "Vqi2h36A6nHnA58UViDx5lnK7VbbSpytQK84wYTBmXM7fkMwDuZeQGtOLxc0Vp7FCBkGpsPG+9kyJGtB",
- "fGePe2fzZq5Q0YwEjtS6LbPJaCXIJhumn/h8S4HBTjtaVGgkA6TaUCaYWudXrkRkmIpfUm4Lupp+9ii5",
- "3gqs8cv0uhQSU0lV3OadQcoKmsclhwyx3069zdiK2XKmlYKgXqYbyNaBtlTkao5a/3KDmuMl2ZsGFXnd",
- "bmTsgim2yAFb7NsWC6qQk9eGqLqLWR5wvVbY/OmI5uuKZxIyvVYWsUqQWqhD9ab23CxAXwJwsoft9l+R",
- "r9BnpdgFPDZYdPfz5GD/FRpd7R97sQvA1S3exk0yZCf/dOwkTsfotLNjGMbtRp1FEyNtsflhxrXlNNmu",
- "Y84StnS8bvdZKiinK4iHSRQ7YLJ9cTfRkNbBC89spWSlpdgQpuPzg6aGPw0EcRv2Z8EgqSgKpgvn2VCi",
- "MPTUFMO0k/rhbNllV6rJw+U/ooOw9P6RjhL5sEZTe7/FVo1u3He0gDZap4Ta/OGcNa57X12NHPsqBFi7",
- "qi5ZZXFj5jJLRzEHPflLUkrGNSoWlV4m/yDpmkqaGvY3GwI3Wbx8HqnX1S7Rw28G+IPjXYICeRFHvRwg",
- "ey9DuL7kKy54UhiOkj1ukiaCUznoyYxHi3mO3g0W3D70WKHMjJIMklvVIjcacOo7ER7fMuAdSbFez43o",
- "8cYre3DKrGScPGhldujn92+dlFEIGatJ0xx3J3FI0JLBBQauxTfJjHnHvZD5qF24C/R/rufBi5yBWObP",
- "ckwR+KZiefZLkwTWKXkoKU/XUbv/wnT8talMXS/ZnuNoCZQ15Rzy6HD2zvzV362R2/9fYuw8BeMj23ZL",
- "GdrldhbXAN4G0wPlJzToZTo3E4RYbWfF1FGX+UpkBOdp6m00VNavzhiUK/u9AqVjGQb4wUZ+oH3H6AW2",
- "WhYBnqFUPSPf2Zdl1kBa5QBQmmVFldvUcshWIJ3hsSpzQbMpMeOcvTl8S+ysto+tr2qrda1QmGuvoqPX",
- "B8V5xsUQ+lKp8fjm8eNsD7g0q1Yaq3MoTYsylotmWpz5BpjwFto6UcwLsTMjR1bCVl5+s5MYelgyWRjJ",
- "tB7N8nikCfMfrWm6RtG1xU2GSX58mTlPlSooxl8X1a3r6+C5M3C7SnO20NyUCKNfXDJlHxSBC2inv9W5",
- "oE518ulw7eXJinNLKVEevS1X+TZo98BZh7Y3h0Yh6yD+hoKLrdJ406p7p9grWrCiW8KvV4XfpkDV9WD9",
- "Q1Ep5YKzFMtFBE+Y1CC7x0nG+ApGVNboGqP8EXcnNHK4ooUD63Aih8XBUoKeETrE9Y2VwVezqZY67J8a",
- "X8FYU01WoJXjbJBNff1LZy9hXIGrl4Tv1AR8UsiW/wU5ZNSll9Sm3xuSEcbODwjA35pv75x6hEGl54yj",
- "IOTQ5uJXrUUD307QRnpimqwEKLeedgKh+mD6zDCXLoOrTzP/1gKOYd0XZtnWV9cf6tB77pynzLR9bdra",
- "ygnNz60wRTvpYVm6SYero0blAX3FBxEc8cAk3gQeILcePxxtC7ltdbnjfWoIDS7QYQcl3sM9wqgrhXZK",
- "I1/QvLIUhS2IDXWJJkwzHgHjLePQvAQSuSDS6JWAG4PndaCfSiXVVgQcxdPOgObopYsxNKWdifauQ3U2",
- "GFGCa/RzDG9jU+R0gHHUDRrBjfJN/QCJoe5AmHiNLx85RPZLlqJU5YSoDMOOO0VMY4zDMG5fJrl9AfSP",
- "QV8mst21pPbk3OQmGsokW1TZCnRCsyxWaO4b/ErwK8kqlBzgCtKqLtRVliTFEgztmhR9anMTpYKrqtgy",
- "l29wx+lSEZOj3+EEysdVN4PPCLJfw3qP3py8f/P68OzNkb0vjFpuU8mMzC2hMAzR6LFKgxGdKwXktxCN",
- "v2G/3zoLjoMZFC+OEG1YQNkTIgbULzb4b6yY1jABOZ/6jaO6vAMdO95YvG+P1BPOzdFLFFsl4zGBV9/d",
- "0dFMfbvz2PS/1wOZi1UbkAdOc9/GjMM9irHhN+Z+C7PAexXi7A1YJ2ljDJXw7yCgdlunF7aZJ964vZJx",
- "aLuvS9pvt54MF6ef4h09EEkZJPdTKwZYZ9BQPGU6GP5LtcvC0ZRs5ZRYUT42gg3GsJXs7VuYUUPYUACG",
- "jb8wn3u9xwmwPXUAx96KUB/Z0wfoBx82SErKnKezYRZ9zLoA437I95jQw2aDu4twYbs4SGwl8Qrhw3U2",
- "mtoaeA2UQrGmqmWsdPjIsJIzrP4d1Anpj+V9uheQaiPUB74qCXCTqiFmsuChgy/1NgbUjzr6xpXZ2FZb",
- "o1+/dAez6WUABFkstvbjbHwlicM6IgH9pPjUwAq4e2ugHds7OsJwuYRUs4sdGRf/NFpqE80/9Xqsfcgm",
- "SMBgdcSaf333hup1A9C2hIit8AT1p+4MzlC89TlsHinSooZoMcqp53m3SVRGDCB3SAyJCBXz+FnDm3PC",
- "MFVTBmLBe9htd2hKvgxWAQ/yh245lydJQsOcoi1TXoiY5j5qLtP1Rpl2GHw1lJTRr8M7LAgdYdljVb/g",
- "UD+vG2g15LhfDurSJUpjfkxta/Yp06D8bz4Zzs5in21u6pSjZf+Sysy3iKqqXgtOttxHvUwKX0O2C/Sy",
- "npk18VD92PlIgRGMektzoRhfJUOhg+0QpPDJN3S04nWABY4RriVI9z6B9q9iJ1r4+KltcGxDhXue7DZI",
- "UINFvSxwg6n275taAlgmkdo30Z0TOVyg0VupgU4GGf/Dc25D9mv73QeL+zJ5IzRyR6/JzpR9HwnHVA+J",
- "IdUvibstdweh30brZZzb92pULP2fG1SG1uNSiqxK7QUdHozGxjC2uMYWVhJVGNP+Knuyf46lZt4GKT3n",
- "sJlb+TtdU97U/GkfaytC2TUEKbSd3b5Xg0Bc98lXdgGre4Hzz1Sqp5NSiDwZMBcf96sYdM/AOUvPISPm",
- "7vAxJAOVwMlXaKWs/YGX643P2i9L4JA9nhFi1PKi1BvvGmwX5OxMzh/pbfNf4axZZQuLOH1/9pHHw5+w",
- "5Ie8I3/zw2znagoM87vjVHaQHWUCrgYqKEh6GamLP/atxoizrlurvCEqC0VMSrllzuio893X+SOkHxTr",
- "3q79hCnlPuszFdKajlBa8gadrvDyY2MRGlc23HfYAV6oFAeFwz03cuD8yTFCP9ZICZYySAmt5e/Ss/0T",
- "ozVfCrZIYQSyWaayBcNEX6gMjCjqdW2biOO5b8LA/HHBsaZG3/Sh0JSIdSlDwjHnUl7Q/OHNF1hY4BDx",
- "4V6/iS801H9DJFtUqttFK7ylo+YOdN37m5qfoLnln2D2KGoDdkM5O2pdsN3XmcP6STQnuWgebsAhySWO",
- "aY3G+y/JwkWklhJSplgnWP/Sl8yr1T2sINs8irRdv9y1zl+EvgMZOwVBlORdU35LC7wfGgibI/onM5WB",
- "kxul8hj19cgigr8YjwpTQ3dcF+cta7ItZ9iJ5hAS7tmqHLixb2hV7ie9jl0ergMvnUpBf52jb+sWbiMX",
- "dbO2sS6RPnKHPRl6McaTES+9ZrqjK8UiBOsWEgSV/Lb/G5GwxMLkgjx5ghM8eTJ1TX972v5sjvOTJ1Ex",
- "7sGcKK33g928MYr5ZSj6z0a4DQSadvajYnm2izBaYcPNIwEYGPurC7D+U54p+NXaU/tH1RV4von7trsJ",
- "iJjIWluTB1MFAcEjYoFdt1n0hWcFaSWZ3mDetze/sV+j9XS+qy32zuNTZwq6u0+Lc6grBzT2/Ur52/U7",
- "YV+ELoxMjc5zjS9GvbmiRZmDOyhfP1r8HZ7943m292z/74t/7L3YS+H5i1d7e/TVc7r/6tk+PP3Hi+d7",
- "sL98+WrxNHv6/Oni+dPnL1+8Sp893188f/nq748MHzIgW0AnPsto8j/xLY/k8OQ4OTPANjihJasfijNk",
- "7MuI0xRPIhSU5ZMD/9N/9ydsloqiGd7/OnFJDJO11qU6mM8vLy9nYZf5Cg16iRZVup77efoPdJ0c1wHW",
- "NjEWd9TGzhpSwE11pHCI396/OT0jhyfHs4ZgJgeTvdnebB+f3ymB05JNDibP8Cc8PWvc97kjtsnB5+vp",
- "ZL4GmqP/y/xRgJYs9Z/UJV2tQM5cPXXz08XTuRcl5p+dMfN627d5WJpw/rll88129MTqbfPPPil5e+tW",
- "1q+zdQcdRkIxPKV9gnb+GUXZwd/nqDVYcpx7H0W8ZQvgz/qKZdfdHu7Rx/nn5hXWa3tec4h5JGxoPg0e",
- "bZ0SpgldCImJuTpdmyPqMwKZaj/aW9PbcWbozPR6Xb9IGxRDOvjQE7jtQMSPhIfSUFxzZlozNWxRywrC",
- "+jw102+1b1j/h73k1afP+9P9veu/Gdbu/nzx7Hqka/F186Dtac23Rzb8hOl0aCTFo/R0b+8O7zUd8vB1",
- "Xdyk4Fmw6CPbVZkUQ4q026rOQKRGxo60n87wA096Pr/hireaclqBPJHnF76hGfHZKjj3/sPNfczRsWtY",
- "LLFXyPV08uIhV3/MDcnTnGDLII+7v/U/83MuLrlvae77qiio3PhjrFpMwb8zjbcKXSk07El2QTVMPqHl",
- "OBbWOMBclKa3YC6nptcX5vJQzAU36T6YS3uge2YuT294wP/6K/7CTv9q7PTUsrvx7NSJcjYhcm6fiGwk",
- "vN5zASuIZmZijiTd9vZzl8N+B7r3lPXkjizmT3vV+v/vc/J87/nDQdCudf0DbMg7ocm3aGL9i57Zccdn",
- "myTU0YyyrEfklv2D0t+IbLMFQ4ValS6JKSKXLBg3IPdvl/7jib2nps9hQ2yEkfckc5FBTx66viMP+Mu+",
- "iv2Fh3zhIdJO/+zhpj8FecFSIGdQlEJSyfIN+ZnXKei3V+uyLBq93T76PZ5mtJFUZLACnjiGlSxEtvHl",
- "B1sDnoM1YvcElfnndg1xaygbNEsd4e/104Z9oBcbcnzUk2Bsty6n/WaDTTsaY0Qn7IK4VTPs8qIBZWwb",
- "mZuFrIQmFguZW9QXxvOF8dxJeBl9eGLyS1Sb8Iac7p089bVYYtWKqO5PPUbn+FOP671sdF+fiekvNsod",
- "MhJ8sOlYXTR/YQlfWMLdWMJ3EDmMeGodk4gQ3W0svX0GgQG9WfclHgx08M2rnEqiYKyZ4hBHdMaJh+AS",
- "D62kRXFldTTKCVwxG8sY2bD71du+sLgvLO4v5LXazWjagsiNNZ1z2BS0rPUbta50Ji5tDcMoV8Ty/jR3",
- "tYAxzrKO2dCC+AGavFnyk6s5kG8wuJRlRozTrAAjUtW8znT22RBN2LMZoXmSecU4ToCsAmexRa9pkJGm",
- "IBXcPmDa8bU5yN5ZnTDGZH+vADmaw42DcTJtOVvcNkZKTN9Z/ur7Rq632NKRKmxEeD8eo36itPX3/JIy",
- "nSyFdNmqiL5+Zw00n7tSXp1fm7IUvS9YayP4MQjsiP86r59kiH7sBq/EvrqIEd+oiU4Lo71wg+s4rw+f",
- "zD5hRV+3903w0sF8jilea6H0fHI9/dwJbAo/fqq35nN9Lbstuv50/X8DAAD//50wp92LvAAA",
+ "hpvTukfN4R1CXw8ZEeQthMiBckSeP3d9lPElW1USFLlcg167O0+CKgVXQMTiX5Bqs+3/4/TH90RI8gMo",
+ "RVdwQtNzAjwV2fAeu0ljN/i/lDAbXqhVSdPz+HWds4JFQP6BXrGiKgivigVIs1/+ftCCSNCV5EMA2RF3",
+ "0FlBr/qTnsmKp7i5zbQtQc2QElNlTjczcrwkBb36+97UgaMIzXNSAs8YXxF9xQeFNDP3bvASKSqejZBh",
+ "tNmw4NZUJaRsySAj9ShbIHHT7IKH8ZvB00hWATh+kEFw6ll2gMPhKkIz5uiaL6SkKwhIZkZ+cpwLv2px",
+ "DrxmcGSxwU+lhAsmKlV3GoARp94uXnOhISklLFmExk4dOgz3sG0cey2cgJMKrinjkBnOi0ALDZYTDcIU",
+ "TLhdmelf0Quq4NWLoQu8+Tpy95eiu+tbd3zUbmOjxB7JyL1ovroDGxebWv1HKH/h3IqtEvtzbyPZ6sxc",
+ "JUuW4zXzL7N/Hg2VQibQQoS/eBRbcaorCQef+FPzF0nIqaY8ozIzvxT2px+qXLNTtjI/5fand2LF0lO2",
+ "GkBmDWtUm8Juhf3HjBdnx/oqqjS8E+K8KsMFpS2tdLEhx0dDm2zHvClhHtaqbKhVnF15TeOmPfRVvZED",
+ "QA7irqSm4TlsJBhoabrEf66WSE90KX83/5RlbnrrchlDraFjd9+ibcDZDA7LMmcpNUj84D6br4YJgNUS",
+ "aNNijhfqwZcAxFKKEqRmdlBalkkuUponSlONI/2HhOXkYPKXeWNcmdvuah5M/s70OsVORh61Mk5Cy/IG",
+ "Y5wYuUZtYRaGQeMnZBOW7aFExLjdRENKzLDgHC4o17NGH2nxg/oAf3QzNfi2oozFd0e/GkQ4sQ0XoKx4",
+ "axs+UiRAPUG0EkQrSpurXCzqHx4flmWDQfx+WJYWHygaAkOpC66Y0uoJLp82Jymc5/hoRr4Lx0Y5W/B8",
+ "Yy4HK2qYu2Hpbi13i9WGI7eGZsRHiuB2CjkzW+PRYGT4+6A41BnWIjdSz05aMY3/4dqGZGZ+H9X5z0Fi",
+ "IW6HiQu1KIc5q8DgL4Hm8rhDOX3CcbacGTns9r0d2ZhR4gRzK1rZup923C14rFF4KWlpAXRf7F3KOGpg",
+ "tpGF9Y7cdCSji8IcnOGA1hCqW5+1nechCgmSQgeGb3KRnv+DqvU9nPmFH6t//HAasgaagSRrqtazSUzK",
+ "CI9XM9qYI2YaovZOFsFUs3qJ97W8HUvLqKbB0hy8cbHEoh77IdMDGdFdfsT/0JyYz+ZsG9Zvh52RM2Rg",
+ "yh5n50HIjCpvFQQ7k2mAJgZBCqu9E6N13wjKN83k8X0atUdvrcHA7ZBbhFl6Yw48XAh5uyPRoXVOGiMn",
+ "oWbUgCNMOzuLTasycfiJGEpsg85AjV9pOyV3h4/hqoWFU03/DVhQZtT7wEJ7oPvGgihKlsM9nNd1lBMZ",
+ "zfX5M3L6j8OX+89+efbyleEapRQrSQuy2GhQ5LFTGIjSmxye9FeGInuV6/jor15401h73Ng4SlQyhYKW",
+ "/aGsyc3ey7YZMe36WGujGVddAzjmWJ6BYS8W7cRakw1oR0yZa79Y3MtmDCEsa2bJiIMkg53EdNPlNdNs",
+ "wiXKjazuQ78CKYWMGH3wiGmRijy5AKmYiNjvT1wL4lp4mavs/m6hJZdUETM32iMrjrdchLL0FUfQmIZC",
+ "7ZIZ7NBnV7zBjRuQSkk3PfTb9UZW5+Ydsy9t5HvzliIlyERfcZLBolq1xPOlFAWhJMOOeHG8Y6u1Du7R",
+ "EynE8t4lqugssSXhByuF5KZPXxZ5LzIwumCl7oG9N4M12DOUE+KMLkSlCSVcZICKY6XijH/A+4huD/TW",
+ "6PAu0WsrWCzAKCkprcxqq5KgL6JHi03HhKaWihJEjRow1tZWdtvKTmc9W7kEmhnlBTgRC2cRdbZaXCRF",
+ "R4r2rNNdOxF1rgVXKUUKShml06oSO0Hz7SxZ6i14QsAR4HoWogRZUnlLYLXQNN8BKLaJgVvLic6M3Id6",
+ "3PTbNrA7ebiNVBq901KBEUrNgctBwxAKR+LkAiSaU/+t++cnue32VeVAsIMTrc5Ygeorp1woSAXPVHSw",
+ "nCqd7Dq2plFL/jMrCE5K7KTiwAMmlHdUaWtUZzxDXcCyG5zH2lbMFMMAD16BZuSf/e3XHzs1fJKrStVX",
+ "oarKUkgNWWwNHK62zPUeruq5xDIYu75vtSCVgl0jD2EpGN8hy67EIojq2vbkvE79xaGFxtwDmygqW0A0",
+ "iNgGyKlvFWA3dPgOAGIUx7onEg5THcqpvczTidKiLM3500nF635DaDq1rQ/1T03bPnFR3fD1TICZXXuY",
+ "HOSXFrPW1b+mRmjHkUlBz83dhCK4tf73YTaHMVGMp5Bso3xzLE9Nq/AI7DikA9qPCyYKZuscjg79Rolu",
+ "kAh27MLQggdUsRMqNUtZiZLE97C5d8GqO0HUakUy0JQZ9SD4YIWsMuxPrDunO+btBK1RUnMf/J7YHFlO",
+ "zhReGG3gz2GD5usTGydwFkQX3IOkGBnVnG7KCQLqvY/mQg6bwBVNdb4x15xew4ZcggSiqkXBtLaBH21B",
+ "UosyCQeIWiS2zOhsQtbH7ndgjJHqFIcKltffiunEii3b4TvrCC4tdDiBqRQiH+Ee6CEjCsEo9wEphdl1",
+ "5uKMfDCKp6QWkE6IQYNgzTwfqRaacQXkf4mKpJSjAFZpqG8EIZHN4vVrZjAXWD2ncxQ0GIIcCrByJX55",
+ "+rS78KdP3Z4zRZZw6YPzTMMuOp4+RS3pRCjdOlz3oKKb43Yc4e1oqjEXhZPhujxlt6HajTxmJ086g9f2",
+ "HXOmlHKEa5Z/ZwbQOZlXY9Ye0sg4Iz2OO8oKEwwdWzfuO3pJ/z06fDN0DLr+xIFvqfk45F4y8lW+uQc+",
+ "bQciEkoJCk9VqJco+1Usw/hNd+zURmko+qq97frLgGDzwYsFPSlT8JxxSArBYRNNWWAcfsCPsd72ZA90",
+ "Rh471LcrNrXg74DVnmcMFd4Vv7jbASmf1H7Ve9j87rgdq04YuYpaKeQloSTNGeqsgistq1R/4hSl4uAs",
+ "R0z9XtYf1pPe+CZxxSyiN7mhPnGqDA5rWTlqnlxCRAv+FsCrS6parUDpjnywBPjEXSvGScWZxrkKs1+J",
+ "3bASJNrbZ7ZlQTdkSXNU634HKcii0u0bEwPslDZalzUxmWmIWH7iVJMcjAb6A+NnVzicj2PzNMNBXwp5",
+ "XmNhFj0PK+CgmEriLonv7Fd0Ybrlr507E7Md7GdrRDHjN1F4Gw2tCP7//fg/Dz4eJv9Fk9/3ktf/bf75",
+ "y4vrJ097Pz67/vvf/0/7p+fXf3/yn/8R2ykPeyz8y0F+fOSkyeMjFBka41IP9gezOBSMJ1EiO1sDKRjH",
+ "KOIObZHHRvDxBPSkMVO5Xf/E9RU3hHRBc5ZRfTty6LK43lm0p6NDNa2N6CiQfq2fYy7dlUhKmp6jR2+y",
+ "YnpdLWapKOZeip6vRC1RzzMKheD4LZvTks1VCen8Yn/HlX4HfkUi7KrDZG8tEPT9gfGQTTRZuihMPHnL",
+ "iluiqJQzUmJEkvfLiOW0Dsu16XgHBGM219Q7Fd2fz16+mkybWMv6u9HU7dfPkTPBsqtYRG0GVzFJzR01",
+ "PGKPFCnpRoGO8yGEPeqCsn6LcNgCjIiv1qx8eJ6jNFvEeaWP83Aa3xU/5jYAw5xENM9unNVHLB8ebi0B",
+ "Mij1Opam05I5sFWzmwAdl0opxQXwKWEzmHU1rmwFyjvDcqBLTBdBE6MYE7dWnwNLaJ4qAqyHCxml1sTo",
+ "B8Vkx/evpxMnRqh7l+zdwDG4unPWtlj/txbk0Xdvz8jcsV71yAZ326GDcNyIJcNFnLWcbYab2eREG93+",
+ "iX/iR7BknJnvB594RjWdL6hiqZpXCuQ3NKc8hdlKkAMfxHZENf3EezLbYP5wED5IymqRs5Sch7J1Q542",
+ "J6w/wqdPHw3H//Tpc89z05eE3VRR/mInSC6ZXotKJy7pJZFwSWUWAV3VSQ84sk1Z2zbrlLixLSt2STVu",
+ "/DjPo2WpusHP/eWXZW6WH5ChcqG9ZsuI0kJ6qcaIOhYa3N/3wl0Mkl76jKlKgSK/FrT8yLj+TJJP1d7e",
+ "cyCtaOBfnfBgaHJTQsvmdavg7K69CxduNSS40pImJV2Bii5fAy1x91HyLtC6mucEu7WikH1ACw7VLMDj",
+ "Y3gDLBw3jqjExZ3aXj57Ob4E/IRbiG2MuNE4LW67X0Fc8q23qxPb3NulSq8Tc7ajq1KGxP3O1EmNKyNk",
+ "eU+SYituDoHL/1wASdeQnkOGqWhQlHozbXX3zkonsnrWwZRN2bRRhZhXhObBBZCqzKgT6infdBM8FGjt",
+ "s1o+wDlszkSTlnSTjI52goEaOqhIqYF0aYg1PLZujO7mO8c3BlWXpY/Tx4BNTxYHNV34PsMH2Yq893CI",
+ "Y0TRCoAfQgSVEURY4h9AwS0Wasa7E+nHlmf0lYW9+SIZnp73E9ekUcOc8zpcDcb12+8FYP63uFRkQY3c",
+ "Llzqsg2iD7hYpegKBiTk0EI7MlS9ZdXFQXbde9GbTiy7F1rvvomCbBsnZs1RSgHzxZAKKjOdkAU/k3UC",
+ "4ApmBCuSOIQtchST6mgJy3SobFnKbYmFIdDiBAySNwKHB6ONkVCyWVPls6ox+dyf5VEywL8xKWRbKuBx",
+ "4G0PMszrRD/Pc7vntKdduoRAnwXoU/9C1XJEGp+R8DEALLYdgqMAlEEOK7tw29gTSpOg0myQgePH5TJn",
+ "HEgSc9xTpUTKbFp8c824OcDIx08JscZkMnqEGBkHYKNzCwcm70V4NvnqJkByl2BD/djoFgv+hnjYpQ3N",
+ "MiKPKA0LZ3wgqM5zAOqiPer7qxNzhMMQxqfEsLkLmhs25zS+ZpBeRhqKrZ38M+defTIkzm6x5duL5UZr",
+ "slfRbVYTykwe6LhAtwXi7aJEbAsU4svZsmpcDd2lY6YeuL6HcPU4yGW7FQAdTb+p+uQ0v50aWvtu7t9k",
+ "DUufNjnaPqo0RvtD9BPdpQH89U0QdfbZSfe6jirpbbdrO/EukJ9irNickb6vo+9RUZADSsRJS4JIzmMe",
+ "MCPYA7LbU98t0NwxvY/yzZPAly9hxZSGxhZtbiXvXHlo2xzFqgJCLIdXp0u5NOv7IETNo23aKnZsLfPB",
+ "V3AhNCRLJpVO0JAfXYJp9K1CjfJb0zQuKLSjBWyBHZbFeQNOew6bJGN5FadXN+/3R2ba97URRlWLc9ig",
+ "OAg0XZMFFoSKxhBtmdqGmW1d8Du74Hf03tY77jSYpmZiacilPcef5Fx0OO82dhAhwBhx9HdtEKVbGCRe",
+ "/EeQ61h6WCA02MOZmYazbabH3mHK/Ng7oy8sFMN3lB0pupZAW966CoY+EqPuMR3UU+qnPAycAVqWLLvq",
+ "GALtqIPqIr2Rtu8T1TtYwN11g+3AQGD0i0XVSlDtmgSNdGsrY/FwbbNRmDlrVw4IGUI4FVO+rmMfUYa0",
+ "sfjYLlydAc2/h83Ppi0uZ3I9ndzNbhjDtRtxB65P6u2N4hk93NaO1HID3BDltCyluKB54qyrQ6QpxYUj",
+ "TWzujbEPzOriNryzt4fvThz419NJmgOVSS0qDK4K25V/mlXZ8gcDB8TXjTMKj5fZrSgZbH6dsx1aZC/X",
+ "4Gp0BdJor5hIY20PjqKz0C7jgTY77a3OMWCXuMVBAGXtH2hsV9Y90HYJ0AvKcm808tAOBMXg4sZVpIly",
+ "hXCAO7sWAg9Rcq/spne646ejoa4dPCmca0sVscIWylNE8K7/2IiQaItCUi0olgKxJoE+c+JVkZjjl6ic",
+ "pXEDI18oQxzcOo5MY4KNB4RRM2LFBvyQvGLBWKaZGqHodoAM5ogi05eVGcLdQrgKxxVnv1VAWAZcm08S",
+ "T2XnoGLtFWdq7l+nRnboz+UGtubpZvi7yBhhGZzujYdAbBcwQjdVD9yjWmX2C63NMeaHwB5/A293OGPv",
+ "StziqXb04ajZxgCu2+6msCBxn/8ZwrDF63ZXQ/bKq6vHMzBHtLoxU8lSit8hruehehwJufeFfxiGePwO",
+ "fBbJXOqymNq60xRpbmYf3O4h6Sa0QrU99ANUjzsf+KSwAok3z1Jut9oWG20FesUJJgzOnNvxG4JxMPcC",
+ "WnN6uaCx8ixGyDAwHTbez5YhWQviO3vcO5s3c7WYZiRwpNZtmU1GK0E22TD9xOdbCgx22tGiQiMZINWG",
+ "MsHUOr9yJSLDVPyScluz1vSzR8n1VmCNX6bXpZCYSqriNu8MUlbQPC45ZIj9duptxlbMVmytFAQlQd1A",
+ "ttS1pSJXVtX6lxvUHC/J3jQoOux2I2MXTLFFDthi37ZYUIWcvDZE1V3M8oDrtcLmz0Y0X1c8k5DptbKI",
+ "VYLUQh2qN7XnZgH6EoCTPWy3/5o8Rp+VYhfwxGDR3c+Tg/3XaHS1f+zFLgBXmnkbN8mQnfzTsZM4HaPT",
+ "zo5hGLcbdRZNjLT19IcZ15bTZLuOOUvY0vG63WepoJyuIB4mUeyAyfbF3URDWgcvPLPFoJWWYkOYjs8P",
+ "mhr+NBDEbdifBYOkoiiYLpxnQ4nC0FNT79NO6oezlaVdqSYPl/+IDsLS+0c6SuTDGk3t/RZbNbpx39MC",
+ "2midEmrzh3PWuO59ATly7KsQYO2qumSVxY2ZyywdxRz05C9JKRnXqFhUepn8jaRrKmlq2N9sCNxk8epF",
+ "pF5Xu0QPvxngD453CQrkRRz1coDsvQzh+pLHXPCkMBwle9IkTQSnctCTGY8W8xy9Gyy4feixQpkZJRkk",
+ "t6pFbjTg1HciPL5lwDuSYr2eG9HjjVf24JRZyTh50Mrs0E8f3jkpoxAyVpOmOe5O4pCgJYMLDFyLb5IZ",
+ "8457IfNRu3AX6P9Yz4MXOQOxzJ/lmCLwTcXy7OcmCaxT8lBSnq6jdv+F6fhLU3y7XrI9x9ESKGvKOeTR",
+ "4eyd+Yu/WyO3/7/E2HkKxke27ZYytMvtLK4BvA2mB8pPaNDLdG4mCLHazoqpoy7zlcgIztPU22iorF+d",
+ "MShX9lsFSscyDPCDjfxA+47RC2y1LAI8Q6l6Rr6zj+esgbTKAaA0y4oqt6nlkK1AOsNjVeaCZlNixjl7",
+ "e/iO2FltH1tC1lbrWqEw115FR68PivOMiyH01WDj8c3jx9kecGlWrTRW51CaFmUsF820OPMNMOEttHWi",
+ "mBdiZ0aOrIStvPxmJzH0sGSyMJJpPZrl8UgT5j9a03SNomuLmwyT/Pgyc54qVfDeQF03uK6vg+fOwO0q",
+ "zdlCc1MijH5xyZR9MwUuoJ3+VueCOtXJp8O1lycrzi2lRHn0tlzl26DdA2cd2t4cGoWsg/gbCi62SuNN",
+ "q+6dYq9owYpuCb/eQwM2BaquB+vfwkopF5ylWC4ieKWlBtm9vzLGVzCiskbXGOWPuDuhkcMVLRxYhxM5",
+ "LA6WEvSM0CGub6wMvppNtdRh/9T40MeaarICrRxng2zq6186ewnjCly9JHyKJ+CTQrb8L8ghoy69pDb9",
+ "3pCMMHZ+QAD+1nx779QjDCo9ZxwFIYc2F79qLRr4PIQ20hPTZCVAufW0EwjVR9Nnhrl0GVx9nvnnJHAM",
+ "674wy7a+uv5Qh95z5zxlpu0b09ZWTmh+boUp2kkPy9JNOlwdNSoP6Cs+iOCIBybxJvAAufX44WhbyG2r",
+ "yx3vU0NocIEOOyjxHu4RRl0ptFMa+YLmlaUobEFsqEs0YZrxCBjvGIfmsZPIBZFGrwTcGDyvA/1UKqm2",
+ "IuAonnYGNEcvXYyhKe1MtHcdqrPBiBJco59jeBubIqcDjKNu0AhulG/qN1YMdQfCxBt83Mkhsl+yFKUq",
+ "J0RlGHbcKWIaYxyGcfsyye0LoH8M+jKR7a4ltSfnJjfRUCbZospWoBOaZbFCc9/gV4JfSVah5ABXkFZ1",
+ "oa6yJCmWYGjXpOhTm5soFVxVxZa5fIM7TpeKmBz9HidQPq66GXxGkP0a1nv09uTD2zeHZ2+P7H1h1HKb",
+ "SmZkbgmFYYhGj1UajOhcKSC/hmj8Ffv92llwHMygeHGEaMMCyp4QMaB+scF/Y8W0hgnI+dRvHNXlHejY",
+ "8cbifXuknnBujl6i2CoZjwm8+u6Ojmbq253Hpv+9HshcrNqAPHCa+zZmHO5RjA2/NfdbmAXeqxBnb8A6",
+ "SRtjqIR/BwG12zq9sM088cbtlYxD231d0n679WS4OP0U7+iBSMoguZ9aMcA6g4biKdPB8F+qXRaOpmQr",
+ "p8SK8rERbDCGrWRvn/uMGsKGAjBs/IX53Os9ToDtqQM49laE+siePkDf+7BBUlLmPJ0Ns+hj1gUY90O+",
+ "x4QeNhvcXYQL28VBYiuJVwgfrrPR1NbAa6AUijVVLWOlw0eGlZxh9e+gTkh/LO/TvYBUG6E+8FVJgJtU",
+ "DTGTBQ8dfK23MaB+1NE3rszGttoa/fqlO5hNLwMgyGKxtR9n4ytJHNYRCegnxacGVsDdWwPt2N7REYbL",
+ "JaSaXezIuPin0VKbaP6p12PtQzZBAgarI9b8A8M3VK8bgLYlRGyFJ6g/dWdwhuKtz2HzSJEWNUSLUU49",
+ "z7tNojJiALlDYkhEqJjHzxrenBOGqZoyEAvew267Q1PyZbAKeJA/dMu5PEkSGuYUbZnyQsQ091Fzma43",
+ "yrTD4KuhpIx+Hd5hQegIyx6r+gWH+gXhQKshx/1yUJcuURrzY2pbs0+ZBuV/88lwdhb7MnVTpxwt+5dU",
+ "Zr5FVFX1WnCy5T7qZVL4GrJdoJf1zKyJh+rHzkcKjGDUW5oLxfgqGQodbIcgha/aoaMVrwMscIxwLUG6",
+ "9wm0f/g70cLHT22DYxsq3Atst0GCGizqZYEbTLX/0NQSwDKJ1D777pzI4QKN3koNdDLI+B+ecxuy39jv",
+ "Pljcl8kboZE7ek12puz7SDimekgMqX5J3G25Owj9Nlov49y+V6Ni6f/coDK0HpdSZFVqL+jwYDQ2hrHF",
+ "NbawkqjCmPZX2ZP9cyw18y5I6TmHzdzK3+ma8qbmT/tYWxHKriFIoe3s9r0aBOK6T76yC1jdC5x/pFI9",
+ "nZRC5MmAufi4X8WgewbOWXoOGTF3h48hGagETh6jlbL2B16uNz5rvyyBQ/ZkRohRy4tSb7xrsF2QszM5",
+ "f6S3zX+Fs2aVLSzi9P3ZJx4Pf8KSH/KO/M0Ps52rKTDM745T2UF2lAm4GqigIOllpC7+2LcaI866bq3y",
+ "hqgsFDEp5ZY5o6POd1/nj5B+UKx7u/YTppT7rM9USGs6QmnJG3S6wssPjUVoXNlw32EHeKFSHBQO99zI",
+ "gfMHxwj9UCMlWMogJbSWv0vP9k+M1nwp2CKFEchmmcoWDBN9oTIwoqg3tW0ijue+CQPzxwXHmhp904dC",
+ "UyLWpQwJx5xLeUHzhzdfYGGBQ8SHe/0mvtBQ/w2RbFGpbhet8I6OmjvQde9van6C5pZ/gtmjqA3YDeXs",
+ "qHXBdl9nDusn0Zzkonm4AYcklzimNRrvvyILF5FaSkiZYp1g/UtfMq9W97CCbPMo0nb9ctc6fxb6DmTs",
+ "FARRkvdN+S0t8H5oIGyO6B/MVAZObpTKY9TXI4sI/mI8KkwN3XFdnLesybacYSeaQ0i4Z6ty4Ma+oVW5",
+ "n/Q6dnm4Drx0KgX9dY6+rVu4jVzUzdrGukT6yB32ZOjFGE9GvPSa6Y6uFIsQrFtIEFTy6/6vRMISC5ML",
+ "8vQpTvD06dQ1/fVZ+7M5zk+fRsW4B3OitN4PdvPGKObnoeg/G+E2EGja2Y+K5dkuwmiFDTePBGBg7C8u",
+ "wPoPeabgF2tP7R9VV+D5Ju7b7iYgYiJrbU0eTBUEBI+IBXbdZtEXnhWklWR6g3nf3vzGfonW0/muttg7",
+ "j0+dKejuPi3Ooa4c0Nj3K+Vv1++EfRG6MDI1Os81vhj19ooWZQ7uoPz90eKv8PxvL7K95/t/Xfxt7+Ve",
+ "Ci9evt7bo69f0P3Xz/fh2d9evtiD/eWr14tn2bMXzxYvnr149fJ1+vzF/uLFq9d/fWT4kAHZAjrxWUaT",
+ "/4lveSSHJ8fJmQG2wQktWf1QnCFjX0acpngSoaAsnxz4n/67P2GzVBTN8P7XiUtimKy1LtXBfH55eTkL",
+ "u8xXaNBLtKjS9dzP03+g6+S4DrC2ibG4ozZ21pACbqojhUP89uHt6Rk5PDmeNQQzOZjszfZm+/j8Tgmc",
+ "lmxyMHmOP+HpWeO+zx2xTQ6+XE8n8zXQHP1f5o8CtGSp/6Qu6WoFcubqqZufLp7NvSgx/+KMmdfbvs3D",
+ "0oTzLy2bb7ajJ1Zvm3/xScnbW7eyfp2tO+gwEorhKe0TtPMvKMoO/j53CQjxj6hSWFqdewdGvGVrNV/0",
+ "Fcuuuz3ci5DzL80Trdf2MOcQc1fYuH0avOg6JUwTuhASs3Z1ujbn16cLMtV+0bcmxuPMEKHp9aZ+rjao",
+ "lHTwsSeN24GIHwlPrCHH5kC1Zmp4ppYVhMV76huh1b65Fz7uJa8/f9mf7u9d/8Xwfffny+fXI/2Ob5rX",
+ "bk9rpj6y4WfMtUMLKp6zZ3t7d3jM6ZCHT+/iJgVvhkVf4K7KpBjSst1WdQYiNTJ25AR1hh947/PFDVe8",
+ "1c7TivKJvM3wDc2IT2XBufcfbu5jjl5fw3+JvV+up5OXD7n6Y25InuYEWwZJ3v2t/4mfc3HJfUsjDFRF",
+ "QeXGH2PVYgr+EWq8cuhKodVPsguqYfIZzcqxmMcB5qI0vQVzOTW9vjKXh2IuuEn3wVzaA90zc3l2wwP+",
+ "51/xV3b6Z2Onp5bdjWenTpSz2ZJz+35kI+H13hJYQTRtExMo6baHobsc9jvQvXeuJ3dkMX/Yk9f/f5+T",
+ "F3svHg6CdiHs72FD3gtNvkX765/0zI47PtskoY5mlGU9IrfsH5T+RmSbLRgq1Kp0GU4RuWTBuAG5f7v0",
+ "X1bsvUN9Dhtiw4+8m5mLDHry0PUdecCf9snsrzzkKw+RdvrnDzf9KcgLlgI5g6IUkkqWb8hPvM5Pv71a",
+ "l2XR0O720e/xNKONpCKDFfDEMaxkIbKNr03YGvAcrIW7J6jMv7QLjFsr2qBZ6gh/r9897AO92JDjo54E",
+ "Y7t1Oe03G2za0RgjOmEXxK2aYZcXDShj28jcLGQlNLFYyNyivjKer4znTsLL6MMTk1+i2oQ35HTv5Kkv",
+ "1BIrZUR1f+oxOscfelzvZaP7+kxMf7Eh8JCR4IPN1eqi+StL+MoS7sYSvoPIYcRT65hEhOhuY+ntMwiM",
+ "9s26z/RgFIRvXuVUEgVjzRSHOKIzTjwEl3hoJS2KK6ujUU7gitlAx8iG3a/e9pXFfWVxfyKv1W5G0xZE",
+ "bqzpnMOmoGWt36h1pTNxaQscRrki1v6nuSsUjEGYdUCHFsQP0CTVkh9dQYJ8g5GnLDNinGYFGJGq5nWm",
+ "s0+VaGKizQjNe80rxnECZBU4i62ITYN0NQWp4PZ1046vzUH23uqEMSb7WwXI0RxuHIyTacvZ4rYxUn/6",
+ "zvJX3zdyvcWWjlRhw8X7wRr1+6Wtv+eXlOlkKaRLZUX09TtroPnc1fnq/NrUrOh9wUIcwY9BYEf813n9",
+ "XkP0YzeyJfbVRYz4Rk3oWhgKhhtcB4F9/Gz2Ccv9ur1vIpsO5nPM/1oLpeeT6+mXTtRT+PFzvTVf6mvZ",
+ "bdH15+v/GwAA//9MwM7Ji70AAA==",
}
// GetSwagger returns the Swagger specification corresponding to the generated code
diff --git a/daemon/algod/api/server/v2/generated/private/types.go b/daemon/algod/api/server/v2/generated/private/types.go
index 1eaa4b4c8..70a6da158 100644
--- a/daemon/algod/api/server/v2/generated/private/types.go
+++ b/daemon/algod/api/server/v2/generated/private/types.go
@@ -626,6 +626,13 @@ type ApplicationResponse Application
// AssetResponse defines model for AssetResponse.
type AssetResponse Asset
+// BlockHashResponse defines model for BlockHashResponse.
+type BlockHashResponse struct {
+
+ // Block header hash.
+ BlockHash string `json:"blockHash"`
+}
+
// BlockResponse defines model for BlockResponse.
type BlockResponse struct {
diff --git a/daemon/algod/api/server/v2/generated/routes.go b/daemon/algod/api/server/v2/generated/routes.go
index 3389b370d..7a5720857 100644
--- a/daemon/algod/api/server/v2/generated/routes.go
+++ b/daemon/algod/api/server/v2/generated/routes.go
@@ -38,6 +38,9 @@ type ServerInterface interface {
// Get the block for the given round.
// (GET /v2/blocks/{round})
GetBlock(ctx echo.Context, round uint64, params GetBlockParams) error
+ // Get the block hash for the block on the given round.
+ // (GET /v2/blocks/{round}/hash)
+ GetBlockHash(ctx echo.Context, round uint64) error
// Gets a proof for a given light block header inside a state proof commitment
// (GET /v2/blocks/{round}/lightheader/proof)
GetLightBlockHeaderProof(ctx echo.Context, round uint64) error
@@ -397,6 +400,36 @@ func (w *ServerInterfaceWrapper) GetBlock(ctx echo.Context) error {
return err
}
+// GetBlockHash converts echo context to params.
+func (w *ServerInterfaceWrapper) GetBlockHash(ctx echo.Context) error {
+
+ validQueryParams := map[string]bool{
+ "pretty": true,
+ }
+
+ // Check for unknown query parameters.
+ for name, _ := range ctx.QueryParams() {
+ if _, ok := validQueryParams[name]; !ok {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
+ }
+ }
+
+ var err error
+ // ------------- Path parameter "round" -------------
+ var round uint64
+
+ err = runtime.BindStyledParameter("simple", false, "round", ctx.Param("round"), &round)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
+ }
+
+ ctx.Set("api_key.Scopes", []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetBlockHash(ctx, round)
+ return err
+}
+
// GetLightBlockHeaderProof converts echo context to params.
func (w *ServerInterfaceWrapper) GetLightBlockHeaderProof(ctx echo.Context) error {
@@ -837,6 +870,7 @@ func RegisterHandlers(router interface {
router.GET("/v2/applications/:application-id", wrapper.GetApplicationByID, m...)
router.GET("/v2/assets/:asset-id", wrapper.GetAssetByID, m...)
router.GET("/v2/blocks/:round", wrapper.GetBlock, m...)
+ router.GET("/v2/blocks/:round/hash", wrapper.GetBlockHash, m...)
router.GET("/v2/blocks/:round/lightheader/proof", wrapper.GetLightBlockHeaderProof, m...)
router.GET("/v2/blocks/:round/transactions/:txid/proof", wrapper.GetTransactionProof, m...)
router.GET("/v2/ledger/supply", wrapper.GetSupply, m...)
@@ -856,215 +890,217 @@ func RegisterHandlers(router interface {
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9a3PctpLoX8Gd3Srb2uFIfm6sqtRexXIS3WM7LkvJObuRb4whe2ZwRAI8ACjNxNf/",
- "/RYaAAmS4MzoYTlO9MnWEI9Go9HoNz6OUlGUggPXarT/cVRSSQvQIPEvmqai4jphmfkrA5VKVmom+Gjf",
- "fyNKS8bno/GImV9Lqhej8YjTApo2pv94JOFfFZOQjfa1rGA8UukCCmoG1qvStK5HWiZzkbghDuwQR4ej",
- "T2s+0CyToFQfyp94viKMp3mVAdGSckVT80mRC6YXRC+YIq4zYZwIDkTMiF60GpMZgzxTE7/If1UgV8Eq",
- "3eTDS/rUgJhIkUMfzheimDIOHiqogao3hGhBMphhowXVxMxgYPUNtSAKqEwXZCbkBlAtECG8wKtitP/r",
- "SAHPQOJupcDO8b8zCfA7JJrKOejR+3FscTMNMtGsiCztyGFfgqpyrQi2xTXO2TlwYnpNyOtKaTIFQjl5",
- "9/0L8vjx4+dmIQXVGjJHZIOramYP12S7j/ZHGdXgP/dpjeZzISnPkrr9u+9f4PzHboHbtqJKQfywHJgv",
- "5OhwaAG+Y4SEGNcwx31oUb/pETkUzc9TmAkJW+6JbXyjmxLO/0V3JaU6XZSCcR3ZF4Jfif0c5WFB93U8",
- "rAag1b40mJJm0F/3kufvPz4cP9z79G+/HiT/4/58+vjTlst/UY+7AQPRhmklJfB0lcwlUDwtC8r7+Hjn",
- "6EEtRJVnZEHPcfNpgaze9SWmr2Wd5zSvDJ2wVIqDfC4UoY6MMpjRKtfET0wqnhs2ZUZz1E6YIqUU5yyD",
- "bGy478WCpQuSUmWHwHbkguW5ocFKQTZEa/HVrTlMn0KUGLiuhA9c0B8XGc26NmAClsgNkjQXChItNlxP",
- "/sahPCPhhdLcVepylxU5WQDByc0He9ki7rih6TxfEY37mhGqCCX+ahoTNiMrUZEL3JycnWF/txqDtYIY",
- "pOHmtO5Rc3iH0NdDRgR5UyFyoByR589dH2V8xuaVBEUuFqAX7s6ToErBFRAx/Sek2mz7/zn+6Q0RkrwG",
- "pegc3tL0jABPRTa8x27S2A3+TyXMhhdqXtL0LH5d56xgEZBf0yUrqoLwqpiCNPvl7wctiARdST4EkB1x",
- "A50VdNmf9ERWPMXNbaZtCWqGlJgqc7qakKMZKejy272xA0cRmuekBJ4xPid6yQeFNDP3ZvASKSqebSHD",
- "aLNhwa2pSkjZjEFG6lHWQOKm2QQP45eDp5GsAnD8IIPg1LNsAIfDMkIz5uiaL6SkcwhIZkJ+dpwLv2px",
- "BrxmcGS6wk+lhHMmKlV3GoARp14vXnOhISklzFiExo4dOgz3sG0cey2cgJMKrinjkBnOi0ALDZYTDcIU",
- "TLhemelf0VOq4NmToQu8+brl7s9Ed9fX7vhWu42NEnskI/ei+eoObFxsavXfQvkL51ZsntifexvJ5ifm",
- "KpmxHK+Zf5r982ioFDKBFiL8xaPYnFNdSdg/5TvmL5KQY015RmVmfinsT6+rXLNjNjc/5fanV2LO0mM2",
- "H0BmDWtUm8Juhf3HjBdnx3oZVRpeCXFWleGC0pZWOl2Ro8OhTbZjXpYwD2pVNtQqTpZe07hsD72sN3IA",
- "yEHcldQ0PIOVBAMtTWf4z3KG9ERn8nfzT1nmprcuZzHUGjp29y3aBpzN4KAsc5ZSg8R37rP5apgAWC2B",
- "Ni128ULd/xiAWEpRgtTMDkrLMslFSvNEaapxpH+XMBvtj/5ttzGu7NruajeY/JXpdYydjDxqZZyEluUl",
- "xnhr5Bq1hlkYBo2fkE1YtocSEeN2Ew0pMcOCczinXE8afaTFD+oD/KubqcG3FWUsvjv61SDCiW04BWXF",
- "W9vwniIB6gmilSBaUdqc52Ja/3D/oCwbDOL3g7K0+EDREBhKXbBkSqsHuHzanKRwnqPDCfkhHBvlbMHz",
- "lbkcrKhh7oaZu7XcLVYbjtwamhHvKYLbKeTEbI1Hg5Hhb4LiUGdYiNxIPRtpxTT+0bUNycz8vlXnr4PE",
- "QtwOExdqUQ5zVoHBXwLN5X6HcvqE42w5E3LQ7Xs1sjGjxAnmSrSydj/tuGvwWKPwQtLSAui+2LuUcdTA",
- "bCML6zW56ZaMLgpzcIYDWkOornzWNp6HKCRICh0YvstFenYD531qxukfOxyeLIBmIElGNQ3OlTsv8Tsb",
- "O/6I/ZAjgIwI9j/hf2hOzGdD+IYv2mGNws6QfkVgXs+MnmulZzuTaYD6tyCFVW2JUUkvBeWLZvIej7Bo",
- "2YZHvLTaNMEefhFm6Y2t7GAq5NXopUMInDQWQELNqMFxGXd2FptWZeLwE7Ei2AadgRqnS1+YDDHUHT6G",
- "qxYWjjX9DFhQZtSbwEJ7oJvGgihKlsMNnNcFVYv+Ioxa9/gROf7x4OnDR789evrM6CWlFHNJCzJdaVDk",
- "vpOmidKrHB70V4bybJXr+OjPnni7UXvc2DhKVDKFgpb9oaw9yl5athkx7fpYa6MZV10DuM2xPAHDXiza",
- "iTW1GtAOmTJ3YjG9kc0YQljWzJIRB0kGG4npsstrplmFS5QrWd2E8gFSChmxiOAR0yIVeXIOUjERMW6/",
- "dS2Ia+EFkrL7u4WWXFBFzNxorKt4BnISoyy95Aga01CoTReqHfpkyRvcuAGplHTVQ79db2R1bt5t9qWN",
- "fG/7UaQEmeglJxlMq3lLdp1JURBKMuyIF8crNl/o4B59K4WY3bi4EZ0ltiT8gAZ2kps+7qazsgEC/EZk",
- "YBSlSt0Ae28Ga7BnKCfEGZ2KShNKuMgAtapKxRn/gGsOfQLoytDhXaIXVrCYgpHgU1qZ1VYlQUN9jxab",
- "jglNLRUliBo1YMmsTdC2lZ3Oun1yCTQzkj1wIqbOXOgMmbhIil4G7Vmnu3Yiuk4LrlKKFJQyGpmVszeC",
- "5ttZstRr8ISAI8D1LEQJMqPyisBqoWm+AVBsEwO3lhOdjbUP9XbTr9vA7uThNlJplDJLBUYoNQcuBw1D",
- "KNwSJ+cg0db4WffPT3LV7avKgUgAJ1qdsAJ1O065UJAKnqnoYDlVOtl0bE2jlvxnVhCclNhJxYEH7Auv",
- "qNLW4sx4hrqAZTc4jzU8mCmGAR68As3Iv/jbrz92avgkV5Wqr0JVlaWQGrLYGjgs18z1Bpb1XGIWjF3f",
- "t1qQSsGmkYewFIzvkGVXYhFEdW2YcS6Z/uLQfGHugVUUlS0gGkSsA+TYtwqwG3pDBwAximPdEwmHqQ7l",
- "1C7Y8UhpUZbm/Omk4nW/ITQd29YH+uembZ+4qG74eibAzK49TA7yC4tZ6wdfUCO048ikoGfmbkIR3JrG",
- "+zCbw5goxlNI1lG+OZbHplV4BDYc0gHtx0XaBLN1DkeHfqNEN0gEG3ZhaMEDqthbKjVLWYmSxN9gdeOC",
- "VXeCqEmHZKApM+pB8MEKWWXYn1hfR3fMqwlaW0nNffB7YnNkOTlTeGG0gT+DFdp231on+knger8BSTEy",
- "qjndlBME1LvmzIUcNoElTXW+MtecXsCKXIAEoqppwbS2URFtQVKLMgkHiFok1szobELWAe13YBsj1TEO",
- "FSyvvxXjkRVb1sN30hFcWuhwAlMpRL6F7byHjCgEW9nWSSnMrjMXhOMjNTwltYB0QgwaBGvmeU+10Iwr",
- "IP8tKpJSjgJYpaG+EYRENovXr5nBXGD1nM6K3mAIcijAypX4ZWenu/CdHbfnTJEZXPjINdOwi46dHdSS",
- "3gqlW4frBlR0c9yOIrwdTTXmonAyXJenTDbaItzI2+zk287gtX3HnCmlHOGa5V+bAXRO5nKbtYc0sqBq",
- "sXntOO5WVphg6Ni6cd/Rhfh5dPhm6Bh0/YkDx0vzccj3YuSrfHUDfNoORCSUEhSeqlAvUfarmIXBje7Y",
- "qZXSUPRVe9v1twHB5p0XC3pSpuA545AUgsMqGs/POLzGj7He9mQPdEYeO9S3Kza14O+A1Z5nGyq8Ln5x",
- "twNSfls7HW9g87vjdqw6YVgnaqWQl4SSNGeoswqutKxSfcopSsXBWY6Y+r2sP6wnvfBN4opZRG9yQ51y",
- "qgwOa1k5ap6cQUQL/h7Aq0uqms9B6Y58MAM45a4V46TiTONchdmvxG5YCRLt7RPbsqArMqM5qnW/gxRk",
- "Wun2jYnRZ0obrcuamMw0RMxOOdUkB6OBvmb8ZInD+SAvTzMc9IWQZzUWJtHzMAcOiqkk7pL4wX79kaqF",
- "X75p6Jmk62yNKGb8JkRtpaEV3v5/7//X/q8Hyf/Q5Pe95Pl/7L7/+OTTg53ej48+ffvt/2v/9PjTtw/+",
- "699jO+Vhj8VGOciPDp00eXSIIkNjXOrBfmsWh4LxJEpkJwsgBeMYYtuhLXLfCD6egB40Ziq366dcL7kh",
- "pHOas4zqq5FDl8X1zqI9HR2qaW1ER4H0a30fc+nORVLS9Aw9eqM504tqOklFseul6N25qCXq3YxCITh+",
- "y3ZpyXZVCenu+cMNV/o1+BWJsKsOk72yQND3B8bjGdFk6UIU8eTNKm6JolLOSInhOt4vI2bjOmbV5qrt",
- "EwxoXFDvVHR/Pnr6bDRuAhHr70ZTt1/fR84Ey5axcNMMljFJzR01PGL3FCnpSoGO8yGEPeqCsn6LcNgC",
- "jIivFqy8fZ6jNJvGeeWPjjE6jW/Jj7gNwDAnEc2zK2f1EbPbh1tLgAxKvYjlsLRkDmzV7CZAx6VSSnEO",
- "fEzYBCZdjSubg/LOsBzoDHMp0MQotgnqqs+BJTRPFQHWw4VspdbE6AfFZMf3P41HToxQNy7Zu4FjcHXn",
- "rG2x/m8tyL0fXp6QXcd61T0b+WyHDmJVI5YMF47VcrYZbmYz92zo9yk/5YcwY5yZ7/unPKOa7k6pYqna",
- "rRTI72hOeQqTuSD7PsLrkGp6ynsy22BybRBbR8pqmrOUnIWydUOeNmGqP8Lp6a+G45+evu95bvqSsJsq",
- "yl/sBMkF0wtR6cRlhCQSLqjMIqCrOiMAR7b5XOtmHRM3tmXFLuPEjR/nebQsVTcyuL/8sszN8gMyVC7u",
- "1WwZUVpIL9UYUcdCg/v7RriLQdILn05UKVDkQ0HLXxnX70lyWu3tPQbSCpX94IQHQ5OrElo2rytFLnft",
- "XbhwqyHBUkualHQOKrp8DbTE3UfJu0Drap4T7NYK0fUBLThUswCPj+ENsHBcOtwQF3dse/nU3vgS8BNu",
- "IbYx4kbjtLjqfgVBu1ferk7gb2+XKr1IzNmOrkoZEvc7U2f8zY2Q5T1Jis25OQQuOXIKJF1AegYZ5mlB",
- "UerVuNXdOyudyOpZB1M2n9FGFWLSDZoHp0CqMqNOqKd81c1+UKC1T/l4B2ewOhFNzs5l0h3a0fdq6KAi",
- "pQbSpSHW8Ni6Mbqb7xzfGHFclj6IHQM2PVns13Th+wwfZCvy3sAhjhFFKzp8CBFURhBhiX8ABVdYqBnv",
- "WqQfW57RV6b25oukP3reT1yTRg1zzutwNRj0br8XgMnR4kKRKTVyu3B5vTbCPOBilaJzGJCQQwvtlnHc",
- "LasuDrLp3ovedGLWvdB6900UZNs4MWuOUgqYL4ZUUJnphCz4mawTAFcwIViuwyFsmqOYVEdLWKZDZctS",
- "busPDIEWJ2CQvBE4PBhtjISSzYIqn3KMmdn+LG8lA3zGjIl1eXJHgbc9SL+us+A8z+2e05526bLlfIqc",
- "z4sLVcstctyMhI8BYLHtEBwFoAxymNuF28aeUJrsjWaDDBw/zWY540CSmOOeKiVSZnPGm2vGzQFGPt4h",
- "xBqTydYjxMg4ABudWzgweSPCs8nnlwGSu+wT6sdGt1jwN8TDLm1olhF5RGlYOOMDQXWeA1AX7VHfX52Y",
- "IxyGMD4mhs2d09ywOafxNYP00rVQbO0kZzn36oMhcXaNLd9eLJdak72KrrKaUGbyQMcFujUQrxclYlug",
- "EF/OllXjaugu3Wbqget7CFf3g0SvKwHQ0fSbkkhO89uoobXv5v5N1rD0cZPA7KNKY7Q/RD/RXRrAX98E",
- "Uadmve1e11Elve12bWelBfJTjBWbM9L3dfQ9KgpyQIk4aUkQyVnMA2YEe0B2e+y7BZo75r5RvnoQ+PIl",
- "zJnS0Niiza3knSu3bZujmHIvxGx4dbqUM7O+d0LUPNrmdGLH1jJvfQXnQkMyY1LpBA350SWYRt8r1Ci/",
- "N03jgkI7WsBWn2FZnDfgtGewSjKWV3F6dfP+7dBM+6Y2wqhqegYrFAeBpgsyxWpJ0RiiNVPbMLO1C35l",
- "F/yK3th6tzsNpqmZWBpyac/xlZyLDuddxw4iBBgjjv6uDaJ0DYPEi/8Qch1LDwuEBns4M9Nwss702DtM",
- "mR97Y/SFhWL4jrIjRdcSaMtrV8HQR2LUPaaDYkP9lIeBM0DLkmXLjiHQjjqoLtJLafs+i7uDBdxdN9gG",
- "DARGv1hUrQTVTthvpFtbNoqHa5tshZmTdlp9yBDCqZjyRQ/7iDKkjZW5NuHqBGj+N1j9YtrickafxqPr",
- "2Q1juHYjbsD123p7o3hGD7e1I7XcAJdEOS1LKc5pnjjr6hBpSnHuSBObe2PsLbO6uA3v5OXBq7cO/E/j",
- "UZoDlUktKgyuCtuVX82qbG2AgQPii6oZhcfL7FaUDDa/ztkOLbIXC3AFrAJptFdpo7G2B0fRWWhn8UCb",
- "jfZW5xiwS1zjIICy9g80tivrHmi7BOg5Zbk3GnloB4JicHHblWuJcoVwgGu7FgIPUXKj7KZ3uuOno6Gu",
- "DTwpnGtNia3CVpFTRPCu/9iIkGiLQlItKNbJsCaBPnPiVZGY45eonKVxAyOfKkMc3DqOTGOCjQeEUTNi",
- "xQb8kLxiwVimmdpC0e0AGcwRRaavuTKEu6lw5X8rzv5VAWEZcG0+STyVnYOKhUmcqbl/nRrZoT+XG9ia",
- "p5vhryNjhDViujceArFewAjdVD1wD2uV2S+0NseYHwJ7/CW83eGMvStxjafa0YejZhsDuGi7m8JqvX3+",
- "ZwjDVnbbXCrYK6+uWM3AHNHSv0wlMyl+h7ieh+pxJOTeV8VhGOLxO/BJJHOpy2Jq605TwbiZfXC7h6Sb",
- "0ArV9tAPUD3ufOCTwgok3jxLud1qW4mzFegVJ5gwOHPXjt8QjIO5F9Ca04spjZVnMUKGgemg8X62DMla",
- "EN/Z497ZvJkrVDQhgSO1bstsMloJssmG6Sc+X1FgsNNuLSo0kgFSbSgTjK3zK1ciMkzFLyi3BV1NP3uU",
- "XG8F1vhlel0IiamkKm7zziBlBc3jkkOG2G+n3mZszmw500pBUC/TDWTrQFsqcjVHrX+5Qc3RjOyNg4q8",
- "bjcyds4Um+aALR7aFlOqkJPXhqi6i1kecL1Q2PzRFs0XFc8kZHqhLGKVILVQh+pN7bmZgr4A4GQP2z18",
- "Tu6jz0qxc3hgsOju59H+w+dodLV/7MUuAFe3eB03yZCd/N2xkzgdo9POjmEYtxt1Ek2MtMXmhxnXmtNk",
- "u25zlrCl43Wbz1JBOZ1DPEyi2ACT7Yu7iYa0Dl54ZislKy3FijAdnx80NfxpIIjbsD8LBklFUTBdOM+G",
- "EoWhp6YYpp3UD2fLLrtSTR4u/xEdhKX3j3SUyNs1mtr7LbZqdOO+oQW00Tom1OYP56xx3fvqauTIVyHA",
- "2lV1ySqLGzOXWTqKOejJn5FSMq5Rsaj0LPmGpAsqaWrY32QI3GT67EmkXle7RA+/HOC3jncJCuR5HPVy",
- "gOy9DOH6kvtc8KQwHCV70CRNBKdy0JMZjxbzHL0bLLh+6G2FMjNKMkhuVYvcaMCpr0V4fM2A1yTFej2X",
- "osdLr+zWKbOScfKgldmhn9+9clJGIWSsJk1z3J3EIUFLBucYuBbfJDPmNfdC5lvtwnWg/7KeBy9yBmKZ",
- "P8sxReC7iuXZL00SWKfkoaQ8XUTt/lPT8bemMnW9ZHuOoyVQFpRzyKPD2TvzN3+3Rm7/f4pt5ykY37Jt",
- "t5ShXW5ncQ3gbTA9UH5Cg16mczNBiNV2VkwddZnPRUZwnqbeRkNl/eqMQbmyf1WgdCzDAD/YyA+07xi9",
- "wFbLIsAzlKon5Af7sswCSKscAEqzrKhym1oO2RykMzxWZS5oNiZmnJOXB6+IndX2sfVVbbWuOQpz7VV0",
- "9PqgOM92MYS+VGo8vnn7cdYHXJpVK43VOZSmRRnLRTMtTnwDTHgLbZ0o5oXYmZBDK2ErL7/ZSQw9zJgs",
- "jGRaj2Z5PNKE+Y/WNF2g6NriJsMkv32ZOU+VKijGXxfVrevr4LkzcLtKc7bQ3JgIo19cMGUfFIFzaKe/",
- "1bmgTnXy6XDt5cmKc0spUR69Llf5Kmj3wFmHtjeHRiHrIP6Sgout0njZqnvH2CtasKJbwq9Xhd+mQNX1",
- "YP1DUSnlgrMUy0UET5jUILvHSbbxFWxRWaNrjPJH3J3QyOGKFg6sw4kcFgdLCXpG6BDXN1YGX82mWuqw",
- "f2p8BWNBNZmDVo6zQTb29S+dvYRxBa5eEr5TE/BJIVv+F+SQUZdeUpt+L0lGGDs/IAB/b769ceoRBpWe",
- "MY6CkEObi1+1Fg18O0Eb6YlpMheg3HraCYTqV9Nngrl0GSzfT/xbCziGdV+YZVtfXX+oA++5c54y0/aF",
- "aWsrJzQ/t8IU7aQHZekmHa6OGpUH9JIPIjjigUm8CTxAbj1+ONoaclvrcsf71BAanKPDDkq8h3uEUVcK",
- "7ZRGPqd5ZSkKWxAb6hJNmGY8AsYrxqF5CSRyQaTRKwE3Bs/rQD+VSqqtCLgVTzsBmqOXLsbQlHYm2usO",
- "1dlgRAmu0c8xvI1NkdMBxlE3aAQ3ylf1AySGugNh4gW+fOQQ2S9ZilKVE6IyDDvuFDGNMQ7DuH2Z5PYF",
- "0D8GfZnIdteS2pNzmZtoKJNsWmVz0AnNslihue/wK8GvJKtQcoAlpFVdqKssSYolGNo1KfrU5iZKBVdV",
- "sWYu3+Ca06UiJke/wQmUj6tuBp8QZL+G9R6+fPvu5YuDk5eH9r4warlNJTMyt4TCMESjxyoNRnSuFJAP",
- "IRo/YL8PnQXHwQyKF0eINiyg7AkRA+qnK/w3VkxrmICcT/3SUV3egY4dLy3et0fqCefm6CWKzZPtMYFX",
- "3/XR0Ux9tfPY9L/RA5mLeRuQW05zX8eMwz2KseGX5n4Ls8B7FeLsDVgnaWMMlfDvIKB2W6cXtpkn3ri9",
- "knFou69L2q+3ngwXpx/jHT0QSRkk91MrBlhn0FA8ZToY/ku1y8LRlKzllFhRPjaCDcawleztW5hRQ9hQ",
- "AIaNvzCfe723E2B76gCOvRahPrKnD9DffNggKSlzns6GWfQx6wKM+yHf24QeNhvcXYQL28VBYiuJVwgf",
- "rrPR1NbAa6AUijVVLWOlw7cMKznB6t9BnZD+WN6new6pNkJ94KuSAJepGmImCx46uKu3MaB+1NE3rszG",
- "utoa/fqlG5hNLwMgyGKxtR8n21eSOKgjEtBPik8NzIG7twbasb1bRxjOZpBqdr4h4+LvRkttovnHXo+1",
- "D9kECRisjljzr+9eUr1uAFqXELEWnqD+1LXBGYq3PoPVPUVa1BAtRjn2PO8qicqIAeQOiSERoWIeP2t4",
- "c04YpmrKQCx4D7vtDk3Jl8Eq4EH+0BXn8iRJaJhTtGbKcxHT3Leay3S9VKYdBl8NJWX06/AOC0KHWPZY",
- "1S841M/rBloNOeqXg7pwidKYH1Pbmn3KNCj/m0+Gs7PYZ5ubOuVo2b+gMvMtoqqq14KTNfdRL5PC15Dt",
- "Aj2rZ2ZNPFQ/dj5SYASj3tJcKMbnyVDoYDsEKXzyDR2teB1ggWOEawbSvU+g/avYiRY+fmodHOtQ4Z4n",
- "uwoS1GBRLwvcYKr9u6aWAJZJpPZNdOdEDhdo9FZqoJNBxv/wnOuQ/cJ+98HivkzeFhq5o9dkY8q+j4Rj",
- "qofEkOpnxN2Wm4PQr6L1Ms7tezUqlv7PDSpD63EpRVal9oIOD0ZjY9i2uMYaVhJVGNP+Knuyf46lZl4F",
- "KT1nsNq18ne6oLyp+dM+1laEsmsIUmg7u32jBoG47pPP7QLmNwLnl1Sqx6NSiDwZMBcf9asYdM/AGUvP",
- "ICPm7vAxJAOVwMl9tFLW/sCLxcpn7ZclcMgeTAgxanlR6pV3DbYLcnYm5/f0uvmXOGtW2cIiTt+fnPJ4",
- "+BOW/JDX5G9+mPVcTYFhftecyg6yoUzAcqCCgqQXkbr4277VGHHWdWuVN0RloYhJKVfMGd3qfPd1/gjp",
- "B8W612s/YUq5z/pMhbSmI5SWvEGnK7y8bixC25UN9x02gBcqxUHhcM+NHDhfOEbodY2UYCmDlNBa/iY9",
- "2z8xWvOlYIsURiCbZSpbMEz0hcrAiKJe1LaJOJ77JgzMHxcca2r0TR8KTYlYlzIkHHMu5TnNb998gYUF",
- "DhAf7vWb+EJD/TdEskWlulq0wiu61dyBrntzU/O3aG75O5g9itqA3VDOjloXbPd15rB+Es1JLpqHG3BI",
- "coFjWqPxw2dk6iJSSwkpU6wTrH/hS+bV6h5WkG0eRVqvX25a5y9CX4OMnYIgSvKmKb+lBd4PDYTNEf3C",
- "TGXg5EapPEZ9PbKI4C/Go8LU0A3XxVnLmmzLGXaiOYSEG7YqB27sS1qV+0mv2y4P14GXTqWgv86tb+sW",
- "biMXdbO2bV0ifeQOezL0dBtPRrz0mumOrhSLEKxbSBBU8uHhByJhhoXJBdnZwQl2dsau6YdH7c/mOO/s",
- "RMW4W3OitN4PdvPGKOaXoeg/G+E2EGja2Y+K5dkmwmiFDTePBGBg7G8uwPqLPFPwm7Wn9o+qK/B8Gfdt",
- "dxMQMZG1tiYPpgoCgreIBXbdJtEXnhWklWR6hXnf3vzGfovW0/mhttg7j0+dKejuPi3OoK4c0Nj3K+Vv",
- "1x+EfRG6MDI1Os81vhj1ckmLMgd3UL69N/1PePzNk2zv8cP/nH6z93QvhSdPn+/t0edP6MPnjx/Co2+e",
- "PtmDh7Nnz6ePskdPHk2fPHry7Onz9PGTh9Mnz57/5z3DhwzIFtCRzzIa/QPf8kgO3h4lJwbYBie0ZPVD",
- "cYaMfRlxmuJJhIKyfLTvf/rf/oRNUlE0w/tfRy6JYbTQulT7u7sXFxeTsMvuHA16iRZVutj18/Qf6Hp7",
- "VAdY28RY3FEbO2tIATfVkcIBfnv38viEHLw9mjQEM9of7U32Jg/x+Z0SOC3ZaH/0GH/C07PAfd91xDba",
- "//hpPNpdAM3R/2X+KEBLlvpP6oLO5yAnrp66+en80a4XJXY/OmPmJzPqPJYRb0PFg/jgfplx5xjBeBsb",
- "Ct4q26lcFclxXczV2Rp4hhG81j5oWFuNrKOsKdx21DAqn75u6/ns/xp5r2bG5pXsPG1ZRxG4Ss9MEfuw",
- "uiROpXlL07MwShYJ8l8VyFVDMI6VhYVofOFNF0tbqHnZDjxrxKTYI3ixeu04s9nngFJrv0LDibSsIISk",
- "4auGV+4lz99/fPrNp9EWgKCTSwGmKX6gef7Bvj0KS/QU+ER/l8g5jhSZRPF43NipsUOzTWOMnKu/hnXE",
- "6zbteO0PXHD4MLQNDrDoPtA8Nw0Fh9gevMdEOqQEPESP9vZu7AGCOkXBxt/Vo3iSuMJAfQ5jP0VeJ/Pv",
- "EAw8TfbkBhfajty59nK7w/UW/R3NsLYzKG2X8vCrXcoRRz+z4fjE3mifxqOnX/HeHHHDc2hOsGWQpd6/",
- "RX7mZ1xccN/SSDNVUVC5QlklKEAfSqWfBm+r3bBY7u7Hlhcyu9Zd1qsTfnS44Xq7p4aYYr98U6cWr/le",
- "V5tFP5UrOAxLprR6MCE/hL2RMWM2pM01rCRvXuMspThnmWGxLoDDF41oYLunwkTR6GUbmHbv7t3Peu8e",
- "tK0Orfo/MWBaJL4Wpl6YwnUvvn50eucplSs9VRJU/b1C7cTPWs+9o/QNPpC9BYO9w93Q4+ID4k0Aby3p",
- "tKs1f36+a/W34Jpo3QefkSt/5cLaa5obOgmW28mUs0Wx7oS4v4wQV0eu2RfRsA7kOrEOi73vfvQ1zG5A",
- "lHM13LYQ4kJNN+gb1Ni63+EUDya2IFnY5mrswEWhbRTPsLLcnWD2uQWzfknGGBhNob0vJ4whDIumZuNl",
- "niFrPbFwqdqSX6n09RdG1qC4ZSDdLGhdgTf2hCjHiT8bz/xTCk8OaXdi019abLKB32sEp1a9VJclMCw7",
- "gXaJYTbBOZJVoDA42Y4+JkpIFytbSiYk06sxYZxkYM4eegyFxNIwWlY8tYZ+OwVw/O/rg39gnsLrg3+Q",
- "b8neuBbBMHM+Mr2NBG3LQD+A7gc8q+9WB7U4sFYW+sMIGCc1koJUhBD1WviSp4i0gi6/HULZ0voVY+JZ",
- "QZejtZLI+OuRFq8rNHVSMPtUhIV/OEGnv3+erx1/qwgsaarzFaF4/6xsooiqpk290ra4oUWZhANEY9zW",
- "zOhf/4plq182BDhSWAhf2VoP30mntmMLHS6HFJ/a2yyY9JARheBqUt7d7n61u9sXS0kpzJlmWLiquU/8",
- "XdUCsnkDyoE7kN0wIf8tKgx2sU+cQqzoOs6AmSB+TieABlnAOT4wW2NnZ6e78J0dt+dMkRlcIAelHBt2",
- "0bGz8ycQWZd1rWtKuOAJxxc4z4EEEXJ3cusfWm59uvf4q13NMchzlgI5gaIUkkqWr8jPvC4OeD2xvOY5",
- "FQ/KNa7lP720qkaKDsT3a/muu75pphvJsJVlG5gQ6oeSna48bl5aMro8FnXzhXLU2LtOMPDPelXsfox7",
- "jpVJTEgPPDjfrY4Ot5HLvxJH6NbFRSP3WnxvPvcNEI2neXc78TTbMdMne09uD4JwF94ITb5Hc9lnZumf",
- "1XYQJ6uA2Vzao9J4TELW4rLW1zIVc0LHriA8VihfkTqr1PATywjtk1B9rmFm2JZf/IHt8xvNwlG67KL3",
- "ji/c8YVr8YUuQTUcAbMh1e5HdBWE7KB3JDGb60/kYgz8LVIU3uEiyAx0unBZop20mAhb8TWKh3nKupd8",
- "btj/h0BHSmSGxb7whZkts8eDBD50eoGMEN9Pvg6f+cxmWAOgrj/tH6xCdw7zbzjUzze4R26Y8jHnPpnU",
- "7OKloHzRTN5P00G03ITP8A7Bl0Nwj6m9dInw9ni5RfwZotL9UwsJeSOaXGVXfvnPaPb4nDfy517QG8HB",
- "+qWNxGpp8c4FWYsL+OYdIsUXqbCOR/eMflx02MXqCpYH7ta13IaEiXh5zA3CRXNja1EHSEQrWk4hF3yu",
- "/ph39rptjuMlst114dB4ddC/ntD+Ags3cOFrpLlSHorxFOybIP75vIIp5cJ4nux9c3sQalb48kc8zGL6",
- "wlrF57T43qaJFmuf1qV1fLBEtNytYlmnVGRQa2OIv7WCKj7qJcs+beZzgWPukiyO8YDFhZZkWpZA5dV5",
- "22bP/0lnxqPDMCStVW2zroMSAcWg6JJBE/8x2lJxw9xGMSMLqhZkVnELaP18r+UALl5MzMa1X8pc/GK2",
- "T075DlEL+vTho98ePX3m/3z09NmA6mnmcaUW+spnM5D5bIfZRgP980ZYtLWmGnn7t72Vl9uh8Yhly2hp",
- "vaZ8dngunJsH+cQ9RUq6GqzIWW4o/x0O25QCv/36U0qzafw12x/N9ogZqd+YOuLf1TKiLZLkqmbflf0e",
- "iIsImIghtKb+d4319aXA10iBHbKsay7ftuG4CVu1t5hHnuxcKF9UQNVfREB9I3iCOidwL5C00fLlZEEs",
- "/zgOnDj1q31GnlZVWQqp69OtJluJaTDoSQ+ltEHCdUJYSnW6qMrdj/gfrKfyqalcYh+l3LXupXVy2LFt",
- "caOBg3bMphJdu4SPc3mJGXnNUikOsDCouzHUSmkoekFkrutv6547jN4ugueMQ1IIHqv+8xN+fY0fo6VH",
- "MRhpoDOGhQ317T4024K/A1Z7nm2Y23Xx+wfRgq9lhumsVkJZB1+j6QPpvzktrbcfmmPS+nn3Y+tP5wV2",
- "LdWi0pm4CPqikmTP/TZeoqB86NaqTqNcdMpwKpKBMtT19RlyAjzESLv+GqnfEhSJHSzh8hc17cwYzzpE",
- "gtJbKs5Bqlrpl95he2ff+fPYd7bf94BzVWoTs6rUzUoFb0QGdtx2ab9YPg8XGbhyaH1hoJZ34mqzvxma",
- "dh1FJqXVfKFJVRItYipT0zGhqeWf9uEXtemlDNvKV4Q/B0JzCTRbkSkAJ2JqFt1+cYhQhe8Seb3LSXXx",
- "Bx8auEopUlAKsiR8JXwdaHWROdTS9Bo8IeAIcD0LUYLMqLwisFa8WQ+o7gTZ1+DWnlsnwfSh3m76dRvY",
- "nTzcRiqB+FsUzS6iKHNwhpcICrfECdoF2GfePz/JVbevKvEh6siTJfbrCSvwEuaUCwWp4Jkaflho07HF",
- "4tnBWpRZQXBSou/7moEHlIJXVGn3Dnrr/YWgSLeZYs1LSEMFYs3Iv9TlYXtjp4ZfclWp5ol4qydCFlsD",
- "h+Waud7Asp4LjfF+7FoR1YJUCjaNPISlYPz60fjgaSMdmNqxhHZ/cZiUTJ0S2UdlC4gGEesAOfatAuyG",
- "ZuABQPBh2TJUA9w7Gg1cUyFyoNza80RZmvOnk4rX/YbQdGxbH+ifm7Z94nLJnMjXMwEqNBI4yC8sZhVG",
- "fC6oIg4OUtAzZ0eYu5zKPszmMCbot0vWUb45lsemVXgENhzSrsIaHv/WOescjg79RolukAg27MLQgmMq",
- "8h9C9r+sPNt1LnzG0IW2iSAQryYdqXD3gjKdzIR0b93RmQYZ0W871VEp08oZrqwFTwvnjyM4gmMobhz3",
- "ZlpTXsYlpFkQfFK02f1+DLSZ6nshtwq6bMdFUKZJxTXzlW3MeatlzD+eEn0nPd9Jz3fS8530fCc930nP",
- "d9LznfT8uaXnL5NFRZLE82kfvxJLkCejr1LCv7NYr9FGAjHVKQlGRDfneG10tQaa44JYjpdrKdRgmiY+",
- "pqREJVMgqZmOcVLm1EhDsNS+WBCZUgXPnoQv9M8lLdxzSobXmAaPH5HjHw98NNXCRf202973z8Irvcrh",
- "gctCqd878ekowA0GXTYK9dpP6qLerDA/YzkQZXD1ElsfwjnkRpK3gRrE6CJ97egEaP7C4WaDctR60cKM",
- "9mHc0skc2gpaBs/G4VqpIhQj7zoPUsxoroZfpLDjFbSMlVuq+bRVm5A1fCeyVYfcza7t4ga2Cb2JqWKc",
- "ylUkWLJH3j3S0MIwH0dYfb3v041H/vWJtk9mmygs/g6sih7KdVQeDXmrN6w3lA27nHXoJPocUzfOa1QD",
- "uE2og6Fnvyfkne33RW8rghC5I9Zw5j+M47n7ML5jGtjWCFSO9XytSbse8dHTi2d/7B8OxxduHcUtE9No",
- "DjxxvCWZimyVtDhT+4LJmKJKQTHdfMmErBEPU32vmC/rr6Avc0McBotbx25DelgmjrcOMF4bzLod262x",
- "hSM6zhtg/HNz3yEOGYJAHOuJ6c7darKX5GfNNKs7nnbH04LT2LnsGXdx1F0mMrkaT5MrWfFhdvbSvvav",
- "SHhI76sHhmUhRpe6ZbnPYFrN5/aJ+64VGqua4njNQ8K3zeXscrdlcJcjDjt4XQrkulms3eH6jCMIB74v",
- "JJlLUZUPbI1pvkIDZ1FSvvJODaP5F1VucWgz72+Wh9oo5tjLoN64NmyXe+vNb4H1yd2i7d8tWsgFVe6F",
- "SMhIxTGRM5brsOw8WroZ4ydL3nDgtU+a2vVGVufm3Yb7+112kY21I6cEmegltweqdZhcToU9uZO7cjd/",
- "jRvhra3lPsBg+/kBDUPYfDHIgGXhzdApfuqvhjY/fUcvwlKqNyU0bq+tLwDvxFp7jVSKNWKkFDRLqUKj",
- "Bgd9IeTZZ5Yl9fIoYkVGMLHidz87zugkk41CJY67lUjZTkj1Wnk1xXRu8WW5BklIk/l04AqotLBxZ9j9",
- "sxh2v/OHTxFKJL3oHk7rw8EzuQWbohd6yaNcare0L4YMxS+HqeW25Y1GYvSGbwdkBO91WIcy5CWhJM0Z",
- "upsFV1pWqT7lFB1awcL6xbNrN92wKPXCN4n7VCMuTzfUKTdC1YzUbq6oSDWDiAP7ewAvsalqPgelO5x4",
- "BnDKXSvGScWZxrkKlkqR2Iwkc10bjj6xLQu6IjOao0f2d5CCTI0SEVaRRfeQ0izPXXSImYaI2SmnmuRg",
- "mP5rZgQ6M5z3INQRT5buaizEk5Hdc+FJ3Dr7g/2K+b5u+d4LgM4K+9nn6Y2/zKP+CcsGIT86dBXejw6x",
- "aG8TF9KD/daCBQrGkyiRmRvfxVd1aYvcNzKeJ6AHTYSJ2/VTboRpLQgyeqqvRg5dp27vLNrT0aGa1kZ0",
- "fL9+re9j1cXmIjEqI52b3+dML6opPqvvq47tzkVdgWw3o1AIjt+yXVqyXVVCunv+cIN8cA1+RSLs6u7m",
- "/hMlEQV0YE5LvfH4klV37wfu5Rt4UOeP/YrOxoDTuzdr7t6suXvV5O7NmrvdvXuz5u5Fl7sXXf6qL7pM",
- "1kqIrkrgxjcWdM+0SYmE1M5cM/CwWes1hr5XkukJIScLw/+puQPgHCTNSUqVFYy4jXsusDaiqtIUINs/",
- "5UkLElsR0Ux8v/mvVXNPq729x0D2HnT7WLtFwHn7fVFUxU/oaiLfktPR6ag3koRCnIOrzY7NswrDX2yv",
- "jcP+r3rcn2Rv6wq6ssaVBS1LMNeaqmYzljKL8lwYZWAuOtHaXOAXkAY4W6ONMG2fwUF8YpS7i4mhrgBS",
- "TOju3++XeMT7oFtJ61ZrL/55Bex1fKq/YTfHA9eO3WOIdyzjNljGF2caf6KK+HfF7/9gCwodqa3Xba5T",
- "mMc/6x6xO3kZyZqTDW/GESCtJNMrvOFoyX47A/P/94aPK5Dn/vKrZD7aHy20Lvd3d/H9uYVQendkrqbm",
- "m+p8NPcDndsR3OVSSnaOb1e8//T/AwAA//+6jH6WXC0BAA==",
+ "H4sIAAAAAAAC/+x9aXPctrLoX8Gbe6u83OFI3nKPVZW6T7GdHL1jOy5bJ3eJ/GIM2TODIxLgAUBpJn7+",
+ "76/QAEiQBGeoxbKd6JOtIZZGo9HoHR8nqShKwYFrNTn4OCmppAVokPgXTVNRcZ2wzPyVgUolKzUTfHLg",
+ "vxGlJePLyXTCzK8l1avJdMJpAU0b0386kfDPiknIJgdaVjCdqHQFBTUD601pWtcjrZOlSNwQh3aIo+eT",
+ "T1s+0CyToFQfyp95viGMp3mVAdGSckVT80mRc6ZXRK+YIq4zYZwIDkQsiF61GpMFgzxTM7/If1YgN8Eq",
+ "3eTDS/rUgJhIkUMfzmeimDMOHiqogao3hGhBMlhgoxXVxMxgYPUNtSAKqExXZCHkDlAtECG8wKticvDr",
+ "RAHPQOJupcDO8L8LCfA7JJrKJejJ+2lscQsNMtGsiCztyGFfgqpyrQi2xTUu2RlwYnrNyKtKaTIHQjl5",
+ "++Mz8ujRo6dmIQXVGjJHZIOramYP12S7Tw4mGdXgP/dpjeZLISnPkrr92x+f4fzv3ALHtqJKQfywHJov",
+ "5Oj50AJ8xwgJMa5hifvQon7TI3Iomp/nsBASRu6JbXytmxLO/0V3JaU6XZWCcR3ZF4Jfif0c5WFB9208",
+ "rAag1b40mJJm0F/3k6fvPz6YPtj/9C+/Hib/4/588ujTyOU/q8fdgYFow7SSEni6SZYSKJ6WFeV9fLx1",
+ "9KBWosozsqJnuPm0QFbv+hLT17LOM5pXhk5YKsVhvhSKUEdGGSxolWviJyYVzw2bMqM5aidMkVKKM5ZB",
+ "NjXc93zF0hVJqbJDYDtyzvLc0GClIBuitfjqthymTyFKDFyXwgcu6OtFRrOuHZiANXKDJM2FgkSLHdeT",
+ "v3Eoz0h4oTR3lbrYZUWOV0BwcvPBXraIO25oOs83ROO+ZoQqQom/mqaELchGVOQcNydnp9jfrcZgrSAG",
+ "abg5rXvUHN4h9PWQEUHeXIgcKEfk+XPXRxlfsGUlQZHzFeiVu/MkqFJwBUTM/wGpNtv+f979/JoISV6B",
+ "UnQJb2h6SoCnIhveYzdp7Ab/hxJmwwu1LGl6Gr+uc1awCMiv6JoVVUF4VcxBmv3y94MWRIKuJB8CyI64",
+ "g84Kuu5PeiwrnuLmNtO2BDVDSkyVOd3MyNGCFHT9/f7UgaMIzXNSAs8YXxK95oNCmpl7N3iJFBXPRsgw",
+ "2mxYcGuqElK2YJCRepQtkLhpdsHD+MXgaSSrABw/yCA49Sw7wOGwjtCMObrmCynpEgKSmZG/O86FX7U4",
+ "BV4zODLf4KdSwhkTlao7DcCIU28Xr7nQkJQSFixCY+8cOgz3sG0cey2cgJMKrinjkBnOi0ALDZYTDcIU",
+ "TLhdmelf0XOq4LvHQxd483Xk7i9Ed9e37vio3cZGiT2SkXvRfHUHNi42tfqPUP7CuRVbJvbn3kay5bG5",
+ "ShYsx2vmH2b/PBoqhUyghQh/8Si25FRXEg5O+H3zF0nIO015RmVmfinsT6+qXLN3bGl+yu1PL8WSpe/Y",
+ "cgCZNaxRbQq7FfYfM16cHet1VGl4KcRpVYYLSlta6XxDjp4PbbId86KEeVirsqFWcbz2msZFe+h1vZED",
+ "QA7irqSm4SlsJBhoabrAf9YLpCe6kL+bf8oyN711uYih1tCxu2/RNuBsBodlmbOUGiS+dZ/NV8MEwGoJ",
+ "tGmxhxfqwccAxFKKEqRmdlBalkkuUponSlONI/2rhMXkYPIve41xZc92V3vB5C9Nr3fYycijVsZJaFle",
+ "YIw3Rq5RW5iFYdD4CdmEZXsoETFuN9GQEjMsOIczyvWs0Uda/KA+wL+6mRp8W1HG4rujXw0inNiGc1BW",
+ "vLUN7ygSoJ4gWgmiFaXNZS7m9Q93D8uywSB+PyxLiw8UDYGh1AVrprS6h8unzUkK5zl6PiM/hWOjnC14",
+ "vjGXgxU1zN2wcLeWu8Vqw5FbQzPiHUVwO4Wcma3xaDAy/HVQHOoMK5EbqWcnrZjGf3VtQzIzv4/q/G2Q",
+ "WIjbYeJCLcphziow+EugudztUE6fcJwtZ0YOu30vRzZmlDjBXIpWtu6nHXcLHmsUnktaWgDdF3uXMo4a",
+ "mG1kYb0iNx3J6KIwB2c4oDWE6tJnbed5iEKCpNCB4YdcpKd/pWp1DWd+7sfqHz+chqyAZiDJiqrVbBKT",
+ "MsLj1Yw25oiZhqi9k3kw1axe4nUtb8fSMqppsDQHb1wssajHfsj0QEZ0l5/xPzQn5rM524b122Fn5BgZ",
+ "mLLH2XkQMqPKWwXBzmQaoIlBkMJq78Ro3ReC8lkzeXyfRu3RC2swcDvkFmGW3pgDD+dCXu5IdGidk8bI",
+ "SagZNeAI087OYtOqTBx+IoYS26AzUONX2k7J3eFjuGph4Z2mnwELyox6HVhoD3TdWBBFyXK4hvO6inIi",
+ "o7k+ekje/fXwyYOHvz188p3hGqUUS0kLMt9oUOSuUxiI0psc7vVXhiJ7lev46N899qax9rixcZSoZAoF",
+ "LftDWZObvZdtM2La9bHWRjOuugZwzLE8BsNeLNqJtSYb0J4zZa79Yn4tmzGEsKyZJSMOkgx2EtNFl9dM",
+ "swmXKDeyug79CqQUMmL0wSOmRSry5AykYiJiv3/jWhDXwstcZfd3Cy05p4qYudEeWXG85SKUpdccQWMa",
+ "CrVLZrBDH695gxs3IJWSbnrot+uNrM7NO2Zf2sj35i1FSpCJXnOSwbxatsTzhRQFoSTDjnhxvGTLlQ7u",
+ "0TdSiMW1S1TRWWJLwg9WCslNn74s8lpkYHTBSl0De28Ga7BnKCfEGZ2LShNKuMgAFcdKxRn/gPcR3R7o",
+ "rdHhXaJXVrCYg1FSUlqZ1VYlQV9EjxabjglNLRUliBo1YKytrey2lZ3OerZyCTQzygtwIubOIupstbhI",
+ "io4U7Vmnu3Yi6lwLrlKKFJQySqdVJXaC5ttZstRb8ISAI8D1LEQJsqDyksBqoWm+A1BsEwO3lhOdGbkP",
+ "9bjpt21gd/JwG6k0eqelAiOUmgOXg4YhFI7EyRlINKd+1v3zk1x2+6pyINjBiVbHrED1lVMuFKSCZyo6",
+ "WE6VTnYdW9OoJf+ZFQQnJXZSceABE8pLqrQ1qjOeoS5g2Q3OY20rZophgAevQDPyL/7264+dGj7JVaXq",
+ "q1BVZSmkhiy2Bg7rLXO9hnU9l1gEY9f3rRakUrBr5CEsBeM7ZNmVWARRXduenNepvzi00Jh7YBNFZQuI",
+ "BhHbAHnnWwXYDR2+A4AYxbHuiYTDVIdyai/zdKK0KEtz/nRS8brfEJre2daH+u9N2z5xUd3w9UyAmV17",
+ "mBzk5xaz1tW/okZox5FJQU/N3YQiuLX+92E2hzFRjKeQbKN8cyzfmVbhEdhxSAe0HxdMFMzWORwd+o0S",
+ "3SAR7NiFoQUPqGJvqNQsZSVKEn+DzbULVt0JolYrkoGmzKgHwQcrZJVhf2LdOd0xLydojZKa++D3xObI",
+ "cnKm8MJoA38KGzRfv7FxAsdBdME1SIqRUc3pppwgoN77aC7ksAmsaarzjbnm9Ao25BwkEFXNC6a1Dfxo",
+ "C5JalEk4QNQisWVGZxOyPna/A2OMVO9wqGB5/a2YTqzYsh2+447g0kKHE5hKIfIR7oEeMqIQjHIfkFKY",
+ "XWcuzsgHo3hKagHphBg0CNbM845qoRlXQP5bVCSlHAWwSkN9IwiJbBavXzODucDqOZ2joMEQ5FCAlSvx",
+ "y/373YXfv+/2nCmygHMfnGcadtFx/z5qSW+E0q3DdQ0qujluRxHejqYac1E4Ga7LU3Ybqt3IY3byTWfw",
+ "2r5jzpRSjnDN8q/MADoncz1m7SGNjDPS47ijrDDB0LF1476jl/Tz6PDN0DHo+hMHvqXm45B7ychX+eYa",
+ "+LQdiEgoJSg8VaFeouxXsQjjN92xUxuloeir9rbrbwOCzVsvFvSkTMFzxiEpBIdNNGWBcXiFH2O97cke",
+ "6Iw8dqhvV2xqwd8Bqz3PGCq8Kn5xtwNSflP7Va9h87vjdqw6YeQqaqWQl4SSNGeoswqutKxSfcIpSsXB",
+ "WY6Y+r2sP6wnPfNN4opZRG9yQ51wqgwOa1k5ap5cQEQL/hHAq0uqWi5B6Y58sAA44a4V46TiTONchdmv",
+ "xG5YCRLt7TPbsqAbsqA5qnW/gxRkXun2jYkBdkobrcuamMw0RCxOONUkB6OBvmL8eI3D+Tg2TzMc9LmQ",
+ "pzUWZtHzsAQOiqkk7pL4yX5FF6Zb/sq5MzHbwX62RhQzfhOFt9HQiuD/v3f/4+DXw+R/aPL7fvL03/be",
+ "f3z86d793o8PP33//f9r//To0/f3/uNfYzvlYY+FfznIj547afLoOYoMjXGpB/uNWRwKxpMokR2vgBSM",
+ "YxRxh7bIXSP4eAK615ip3K6fcL3mhpDOaM4yqi9HDl0W1zuL9nR0qKa1ER0F0q/1fcyluxRJSdNT9OhN",
+ "lkyvqvksFcWel6L3lqKWqPcyCoXg+C3boyXbUyWke2cPdlzpV+BXJMKuOkz20gJB3x8YD9lEk6WLwsST",
+ "t6i4JYpKOSMlRiR5v4xYTOuwXJuOd0AwZnNFvVPR/fnwyXeTaRNrWX83mrr9+j5yJli2jkXUZrCOSWru",
+ "qOERu6NISTcKdJwPIexRF5T1W4TDFmBEfLVi5c3zHKXZPM4rfZyH0/jW/IjbAAxzEtE8u3FWH7G4ebi1",
+ "BMig1KtYmk5L5sBWzW4CdFwqpRRnwKeEzWDW1biyJSjvDMuBLjBdBE2MYkzcWn0OLKF5qgiwHi5klFoT",
+ "ox8Ukx3f/zSdODFCXbtk7waOwdWds7bF+r+1IHd+enFM9hzrVXdscLcdOgjHjVgyXMRZy9lmuJlNTrTR",
+ "7Sf8hD+HBePMfD844RnVdG9OFUvVXqVA/kBzylOYLQU58EFsz6mmJ7wnsw3mDwfhg6Ss5jlLyWkoWzfk",
+ "aXPC+iOcnPxqOP7Jyfue56YvCbupovzFTpCcM70SlU5c0ksi4ZzKLAK6qpMecGSbsrZt1ilxY1tW7JJq",
+ "3PhxnkfLUnWDn/vLL8vcLD8gQ+VCe82WEaWF9FKNEXUsNLi/r4W7GCQ99xlTlQJFPhS0/JVx/Z4kJ9X+",
+ "/iMgrWjgD054MDS5KaFl87pUcHbX3oULtxoSrLWkSUmXoKLL10BL3H2UvAu0ruY5wW6tKGQf0IJDNQvw",
+ "+BjeAAvHhSMqcXHvbC+fvRxfAn7CLcQ2RtxonBaX3a8gLvnS29WJbe7tUqVXiTnb0VUpQ+J+Z+qkxqUR",
+ "srwnSbElN4fA5X/OgaQrSE8hw1Q0KEq9mba6e2elE1k962DKpmzaqELMK0Lz4BxIVWbUCfWUb7oJHgq0",
+ "9lktb+EUNseiSUu6SEZHO8FADR1UpNRAujTEGh5bN0Z3853jG4Oqy9LH6WPApieLg5oufJ/hg2xF3ms4",
+ "xDGiaAXADyGCyggiLPEPoOASCzXjXYn0Y8sz+src3nyRDE/P+4lr0qhhznkdrgbj+u33AjD/W5wrMqdG",
+ "bhcuddkG0QdcrFJ0CQMScmihHRmq3rLq4iC77r3oTScW3Qutd99EQbaNE7PmKKWA+WJIBZWZTsiCn8k6",
+ "AXAFM4IVSRzC5jmKSXW0hGU6VLYs5bbEwhBocQIGyRuBw4PRxkgo2ayo8lnVmHzuz/IoGeAzJoVsSwU8",
+ "CrztQYZ5nejneW73nPa0S5cQ6LMAfepfqFqOSOMzEj4GgMW2Q3AUgDLIYWkXbht7QmkSVJoNMnD8vFjk",
+ "jANJYo57qpRImU2Lb64ZNwcY+fg+IdaYTEaPECPjAGx0buHA5LUIzyZfXgRI7hJsqB8b3WLB3xAPu7Sh",
+ "WUbkEaVh4YwPBNV5DkBdtEd9f3VijnAYwviUGDZ3RnPD5pzG1wzSy0hDsbWTf+bcq/eGxNkttnx7sVxo",
+ "TfYqusxqQpnJAx0X6LZAvF2UiG2BQnw5W1aNq6G7dMzUA9f3EK7uBrlslwKgo+k3VZ+c5rdTQ2vfzf2b",
+ "rGHp0yZH20eVxmh/iH6iuzSAv74Jos4+e9O9rqNKetvt2k68C+SnGCs2Z6Tv6+h7VBTkgBJx0pIgktOY",
+ "B8wI9oDs9p3vFmjumN5H+eZe4MuXsGRKQ2OLNreSd67ctG2OYlUBIRbDq9OlXJj1vRWi5tE2bRU7tpZ5",
+ "4ys4ExqSBZNKJ2jIjy7BNPpRoUb5o2kaFxTa0QK2wA7L4rwBpz2FTZKxvIrTq5v3b8/NtK9rI4yq5qew",
+ "QXEQaLoicywIFY0h2jK1DTPbuuCXdsEv6bWtd9xpME3NxNKQS3uOb+RcdDjvNnYQIcAYcfR3bRClWxgk",
+ "XvzPIdex9LBAaLCHMzMNZ9tMj73DlPmxd0ZfWCiG7yg7UnQtgba8dRUMfSRG3WM6qKfUT3kYOAO0LFm2",
+ "7hgC7aiD6iK9kLbvE9U7WMDddYPtwEBg9ItF1UpQ7ZoEjXRrK2PxcG2zUZg5blcOCBlCOBVTvq5jH1GG",
+ "tLH42C5cHQPN/wabX0xbXM7k03RyNbthDNduxB24flNvbxTP6OG2dqSWG+CCKKdlKcUZzRNnXR0iTSnO",
+ "HGlic2+MvWFWF7fhHb84fPnGgf9pOklzoDKpRYXBVWG78ptZlS1/MHBAfN04o/B4md2KksHm1znboUX2",
+ "fAWuRlcgjfaKiTTW9uAoOgvtIh5os9Pe6hwDdolbHARQ1v6BxnZl3QNtlwA9oyz3RiMP7UBQDC5uXEWa",
+ "KFcIB7iyayHwECXXym56pzt+Ohrq2sGTwrm2VBErbKE8RQTv+o+NCIm2KCTVgmIpEGsS6DMnXhWJOX6J",
+ "ylkaNzDyuTLEwa3jyDQm2HhAGDUjVmzAD8krFoxlmqkRim4HyGCOKDJ9WZkh3M2Fq3BccfbPCgjLgGvz",
+ "SeKp7BxUrL3iTM3969TIDv253MDWPN0MfxUZIyyD073xEIjtAkbopuqB+7xWmf1Ca3OM+SGwx1/A2x3O",
+ "2LsSt3iqHX04arYxgKu2uyksSNznf4YwbPG63dWQvfLq6vEMzBGtbsxUspDid4jreageR0LufeEfhiEe",
+ "vwOfRTKXuiymtu40RZqb2Qe3e0i6Ca1QbQ/9ANXjzgc+KaxA4s2zlNuttsVGW4FecYIJgzP37PgNwTiY",
+ "ewGtOT2f01h5FiNkGJgOG+9ny5CsBfGdPe6dzZu5WkwzEjhS67bMJqOVIJtsmH7i8yUFBjvtaFGhkQyQ",
+ "akOZYGqdX7kSkWEqfk65rVlr+tmj5HorsMYv0+tcSEwlVXGbdwYpK2gelxwyxH479TZjS2YrtlYKgpKg",
+ "biBb6tpSkSurav3LDWqOFmR/GhQddruRsTOm2DwHbPHAtphThZy8NkTVXczygOuVwuYPRzRfVTyTkOmV",
+ "sohVgtRCHao3tedmDvocgJN9bPfgKbmLPivFzuCewaK7nycHD56i0dX+sR+7AFxp5m3cJEN28p+OncTp",
+ "GJ12dgzDuN2os2hipK2nP8y4tpwm23XMWcKWjtftPksF5XQJ8TCJYgdMti/uJhrSOnjhmS0GrbQUG8J0",
+ "fH7Q1PCngSBuw/4sGCQVRcF04TwbShSGnpp6n3ZSP5ytLO1KNXm4/Ed0EJbeP9JRIm/WaGrvt9iq0Y37",
+ "mhbQRuuUUJs/nLPGde8LyJEjX4UAa1fVJassbsxcZuko5qAnf0FKybhGxaLSi+QvJF1RSVPD/mZD4Cbz",
+ "7x5H6nW1S/TwiwF+43iXoECexVEvB8jeyxCuL7nLBU8Kw1Gye03SRHAqBz2Z8Wgxz9G7wYLbhx4rlJlR",
+ "kkFyq1rkRgNOfSXC41sGvCIp1uu5ED1eeGU3TpmVjJMHrcwO/f3tSydlFELGatI0x91JHBK0ZHCGgWvx",
+ "TTJjXnEvZD5qF64C/Zf1PHiRMxDL/FmOKQI/VCzPfmmSwDolDyXl6Spq95+bjr81xbfrJdtzHC2BsqKc",
+ "Qx4dzt6Zv/m7NXL7/0OMnadgfGTbbilDu9zO4hrA22B6oPyEBr1M52aCEKvtrJg66jJfiozgPE29jYbK",
+ "+tUZg3Jl/6xA6ViGAX6wkR9o3zF6ga2WRYBnKFXPyE/28ZwVkFY5AJRmWVHlNrUcsiVIZ3isylzQbErM",
+ "OMcvDl8SO6vtY0vI2mpdSxTm2qvo6PVBcZ5xMYS+Gmw8vnn8ONsDLs2qlcbqHErToozlopkWx74BJryF",
+ "tk4U80LszMhzK2ErL7/ZSQw9LJgsjGRaj2Z5PNKE+Y/WNF2h6NriJsMkP77MnKdKFbw3UNcNruvr4Lkz",
+ "cLtKc7bQ3JQIo1+cM2XfTIEzaKe/1bmgTnXy6XDt5cmKc0spUR69LVf5Mmj3wFmHtjeHRiHrIP6Cgout",
+ "0njRqnvvsFe0YEW3hF/voQGbAlXXg/VvYaWUC85SLBcRvNJSg+zeXxnjKxhRWaNrjPJH3J3QyOGKFg6s",
+ "w4kcFgdLCXpG6BDXN1YGX82mWuqwf2p86GNFNVmCVo6zQTb19S+dvYRxBa5eEj7FE/BJIVv+F+SQUZde",
+ "Upt+L0hGGDs/IAD/aL69duoRBpWeMo6CkEObi1+1Fg18HkIb6YlpshSg3HraCYTqV9Nnhrl0Gazfz/xz",
+ "EjiGdV+YZVtfXX+oQ++5c54y0/aZaWsrJzQ/t8IU7aSHZekmHa6OGpUH9JoPIjjigUm8CTxAbj1+ONoW",
+ "ctvqcsf71BAanKHDDkq8h3uEUVcK7ZRGPqN5ZSkKWxAb6hJNmGY8AsZLxqF57CRyQaTRKwE3Bs/rQD+V",
+ "SqqtCDiKpx0DzdFLF2NoSjsT7VWH6mwwogTX6OcY3samyOkA46gbNIIb5Zv6jRVD3YEw8Qwfd3KI7Jcs",
+ "RanKCVEZhh13ipjGGIdh3L5McvsC6B+Dvkxku2tJ7cm5yE00lEk2r7Il6IRmWazQ3A/4leBXklUoOcAa",
+ "0qou1FWWJMUSDO2aFH1qcxOlgquq2DKXb3DF6VIRk6Nf4wTKx1U3g88Isl/Dep+/ePP2xbPD4xfP7X1h",
+ "1HKbSmZkbgmFYYhGj1UajOhcKSAfQjR+wH4fOguOgxkUL44QbVhA2RMiBtTPN/hvrJjWMAE5n/qFo7q8",
+ "Ax07Xli8b4/UE87N0UsUWybjMYFX39XR0Ux9ufPY9L/WA5mLZRuQG05z38aMwz2KseEX5n4Ls8B7FeLs",
+ "DVgnaWMMlfDvIKB2W6cXtpkn3ri9knFou69L2m+3ngwXp5/iHT0QSRkk91MrBlhn0FA8ZToY/ku1y8LR",
+ "lGzllFhRPjaCDcawleztc59RQ9hQAIaNvzCfe73HCbA9dQDH3opQH9nTB+hvPmyQlJQ5T2fDLPqYdQHG",
+ "/ZDvMaGHzQZ3F+HCdnGQ2EriFcKH62w0tTXwGiiFYk1Vy1jp8JFhJcdY/TuoE9Ify/t0zyDVRqgPfFUS",
+ "4CJVQ8xkwUMHt/U2BtSPOvrGldnYVlujX790B7PpZQAEWSy29uNsfCWJwzoiAf2k+NTAErh7a6Ad2zs6",
+ "wnCxgFSzsx0ZF/9ptNQmmn/q9Vj7kE2QgMHqiDX/wPAF1esGoG0JEVvhCepPXRmcoXjrU9jcUaRFDdFi",
+ "lFPP8y6TqIwYQO6QGBIRKubxs4Y354RhqqYMxIL3sNvu0JR8GawCHuQPXXIuT5KEhjlFW6Y8EzHNfdRc",
+ "puuFMu0w+GooKaNfh3dYEHqOZY9V/YJD/YJwoNWQo345qHOXKI35MbWt2adMg/K/+WQ4O4t9mbqpU46W",
+ "/XMqM98iqqp6LTjZch/1Mil8Ddku0It6ZtbEQ/Vj5yMFRjDqLc2FYnyZDIUOtkOQwlft0NGK1wEWOEa4",
+ "FiDd+wTaP/ydaOHjp7bBsQ0V7gW2yyBBDRb1ssANptq/bWoJYJlEap99d07kcIFGb6UGOhlk/A/PuQ3Z",
+ "z+x3Hyzuy+SN0MgdvSY7U/Z9JBxTPSSGVL8g7rbcHYR+Ga2XcW7fq1Gx9H9uUBlaj0spsiq1F3R4MBob",
+ "w9jiGltYSVRhTPur7Mn+OZaaeRmk9JzCZs/K3+mK8qbmT/tYWxHKriFIoe3s9rUaBOK6T760C1heC5xf",
+ "UqmeTkoh8mTAXHzUr2LQPQOnLD2FjJi7w8eQDFQCJ3fRSln7A89XG5+1X5bAIbs3I8So5UWpN9412C7I",
+ "2Zmc39Hb5l/jrFllC4s4fX92wuPhT1jyQ16Rv/lhtnM1BYb5XXEqO8iOMgHrgQoKkp5H6uKPfasx4qzr",
+ "1ipviMpCEZNSLpkzOup893X+COkHxbq3az9hSrnP+kyFtKYjlJa8QacrvLxqLELjyob7DjvAC5XioHC4",
+ "50YOnC8cI/SqRkqwlEFKaC1/l57tnxit+VKwRQojkM0ylS0YJvpCZWBEUc9q20Qcz30TBuaPC441Nfqm",
+ "D4WmRKxLGRKOOZfyjOY3b77AwgKHiA/3+k18oaH+GyLZolJdLlrhJR01d6DrXt/U/A2aW/4TzB5FbcBu",
+ "KGdHrQu2+zpzWD+J5iQXzcMNOCQ5xzGt0fjBd2TuIlJLCSlTrBOsf+5L5tXqHlaQbR5F2q5f7lrnL0Jf",
+ "gYydgiBK8ropv6UF3g8NhM0R/cJMZeDkRqk8Rn09sojgL8ajwtTQHdfFacuabMsZdqI5hIRrtioHbuwL",
+ "WpX7Sa9jl4frwEunUtBf5+jbuoXbyEXdrG2sS6SP3GFPhp6P8WTES6+Z7uhKsQjBuoUEQSUfHnwgEhZY",
+ "mFyQ+/dxgvv3p67ph4ftz+Y4378fFeNuzInSej/YzRujmF+Gov9shNtAoGlnPyqWZ7sIoxU23DwSgIGx",
+ "v7kA6y/yTMFv1p7aP6quwPNF3LfdTUDERNbamjyYKggIHhEL7LrNoi88K0gryfQG8769+Y39Fq2n81Nt",
+ "sXcenzpT0N19WpxCXTmgse9Xyt+uPwn7InRhZGp0nmt8MerFmhZlDu6gfH9n/u/w6C+Ps/1HD/59/pf9",
+ "J/spPH7ydH+fPn1MHzx99AAe/uXJ4314sPju6fxh9vDxw/njh4+/e/I0ffT4wfzxd0///Y7hQwZkC+jE",
+ "ZxlN/gvf8kgO3xwlxwbYBie0ZPVDcYaMfRlxmuJJhIKyfHLgf/rf/oTNUlE0w/tfJy6JYbLSulQHe3vn",
+ "5+ezsMveEg16iRZVutrz8/Qf6HpzVAdY28RY3FEbO2tIATfVkcIhfnv74t0xOXxzNGsIZnIw2Z/tzx7g",
+ "8zslcFqyycHkEf6Ep2eF+77niG1y8PHTdLK3Apqj/8v8UYCWLPWf1DldLkHOXD1189PZwz0vSux9dMbM",
+ "T2bUZSwj3oaKB/HB/TLjzjGC8TY2FLxVtlO5KpLTupirszXwDCN4rX3QsLYaWUdZU7jtqGFUPn3d1vM5",
+ "+DXyXs2CLSvZedqyjiJwlZ6ZIvZhdUmcSvOGpqdhlCwS5D8rkJuGYBwrCwvR+MKbLpa2UMuyHXjWiEmx",
+ "R/Bi9dpxZrPPAaXWfoWGE2lZQQhJw1cNr9xPnr7/+OQvnyYjAEEnlwJMU/xA8/yDfXsU1ugp8In+LpFz",
+ "GikyieLxtLFTY4dmm6YYOVd/DeuI123a8dofuODwYWgbHGDRfaB5bhoKDrE9eI+JdEgJeIge7u9f2wME",
+ "dYqCjb+rR/EkcYmB+hzGfoq8TubfIRh4muzxNS60Hblz5eV2h+st+geaYW1nUNou5cE3u5Qjjn5mw/GJ",
+ "vdE+TSdPvuG9OeKG59CcYMsgS71/i/ydn3Jxzn1LI81URUHlBmWVoAB9KJV+Gryt9sJiuXsfW17I7Ep3",
+ "Wa9O+NHzHdfbHTXEFPvlmzq1eM33utos+qlcwWFYM6XVvRn5KeyNjBmzIW2uYSV58xpnKcUZywyLdQEc",
+ "vmhEA9sdFSaKRi/bwLR7e+9+1nv3sG11aNX/iQHTIvGtMPXCFK568fWj0ztPqVzqqZKg6u8laid+1nru",
+ "HaVv8IHsEQz2FndDj4sPiDcBvLWk067W/Pn5rtXfgmuidR98Rq78jQtrr2hu6CRYbidTzhbFuhXi/jRC",
+ "XB25Zl9EwzqQ28Q6LPa+99HXMLsGUc7VcBshxIWabtA3qLF1t8Mp7s1sQbKwzeXYgYtC2ymeYWW5W8Hs",
+ "cwtm/ZKMMTCaQntfThhDGFZNzcaLPEPWemLhQrUlv1Hp60+MrEFxy0C6W9C6BG/sCVGOE382nvmHFJ4c",
+ "0m7Fpj+12GQDv7cITq16qS5LYFh2Au0Sw2yCcySrQGFwsh19SpSQLla2lExIpjdTwjjJwJw99BgKiaVh",
+ "tKx4ag39dgrg+N9Xh/+FeQqvDv+LfE/2p7UIhpnzkeltJGhbBvoJdD/gWf2wOazFga2y0FcjYBzXSApS",
+ "EULUa+FLniLSCrr+fghla+tXjIlnBV1Ptkoi029HWryq0NRJwexTERb+4QSd/v55vnb8rSKwpqnON4Ti",
+ "/bOxiSKqmjf1StvihhZlEg4QjXHbMqN//SuWrX7REOBIYSF8ZWs7fMed2o4tdLgcUnxqb7dg0kNGFILL",
+ "SXm3u/vN7m5fLCWlMGeaYeGq5j7xd1ULyOYNKAfuQHbDjPy3qDDYxT5xCrGi6zgDZoL4OZ0AGmQB5/jA",
+ "bI2d+/e7C79/3+05U2QB58hBKceGXXTcv/8HEFnXda1rSrjgCccXOM+ABBFyt3LrVy23Ptl/9M2u5h3I",
+ "M5YCOYaiFJJKlm/I33ldHPBqYnnNcyoelGvcyn96aVWNFB2I71fyXXd900w3kmEryzYwIdQPJTtdedq8",
+ "tGR0eSzq5gvlqKl3nWDgn/Wq2P2Y9hwrs5iQHnhwftgcPR8jl38jjtDRxUUj91p8bz73DRCNp3l7M/E0",
+ "45jp4/3HNwdBuAuvhSY/ornsM7P0z2o7iJNVwGwu7FFpPCYha3FZ61uZijmhU1cQHiuUb0idVWr4iWWE",
+ "9kmoPtcwM4zlF1+xfX6nWThKl1303vKFW75wJb7QJaiGI2A2pNr7iK6CkB30jiRmc/2BXIyBv0WKwjtc",
+ "BFmATlcuS7STFhNhK75G8TBP2faSzzX7/xDoSInMsNgXvjAzMns8SOBDpxfICPH97Ovwmc9sgTUA6vrT",
+ "/sEqdOcw/4ZD/XyDe+SGKR9z7pNJzS5eCMpnzeT9NB1Ey3X4DG8RfDEE95jaC5cIb4+XW8QfISrdP7WQ",
+ "kNeiyVV25Zf/iGaPz3kjf+4FvRYcrF/aSKyWFm9dkLW4gG/eIVJ8kQrreHTP6MdFhz3/UMtW+eGv9pmU",
+ "rTLEmIvZTPZN3s5/jT4V17pAzNpmO5Prm9HG8F3T0NYSbVcR/YKKxRdhlV+htvElmNHNcA88pJ6FuBuf",
+ "j+YnWK3F0uleXRtyiLnEy+2OZjRa1AFX0Qq5c8gFX6qvk8ts2/g4XiIEUBcijlcb/vMdy2dYCIYLX3PR",
+ "lQZSjKdg3xjyz3EWTCkXFvh4/y83B6FmhS+nxsOsyC/MOD6nB+kmXT5YS7ku1eWDr6LlsxXLOqVng9o9",
+ "Q/ytFaT1Ua9Z9mk3nwsc/RdkcYwHLC70TNGyBCovz9t2RxIdd2Y8eh6GuLaq99Z1lSKgGBRdMAjr3yYj",
+ "DUGYKy0W7sqquAW0fg7ccgAXfyoW09rPbe5usTggJ/w+USv65MHD3x4++c7/+fDJdwOmLDOPK93SN2Y1",
+ "A5nPdpgxFq0/bsRWW5CukXdw01t5sR2aTli2jpbqbMrxh+fCuY2RT9xRpKSbwQq/5Y7nBMJhm6cFbr6e",
+ "ndJsHn8d22sk9Zt1R/yHWue0RddcFf7bZwQG4qwCJmIIrXlPoMb69qcFtkiBHbKsa7jftL7YhMHbW8wj",
+ "T3YulC8qoOovpTcmqDYC9wJJGy1fThbEcrLTwClcvwJq5GlVlaWQuj7dajZKTIPByJxQShskXCeEpVSn",
+ "q6rc+4j/wfpMn5pKSPaR2z3rrt4mh72zLa41ENmO2VS2bJcEcy50sSCvWCrFIRYadjeG2igNRS8o1XX9",
+ "bdvzqdHbRfCccUgKwWPVxH7Gr6/wY7SUMQY3DnTGMNOhvt2Hq1vwd8BqzzOGuV0Vv1+JFnwlw0xntRLK",
+ "OpkDTR9I/81pab0l0xyT1s97H1t/uqgS11KtKp2J86AvKkn23I/xOgfliMebjWvlolPWV5EMlKGub8+Q",
+ "E+AhRtr110g9qKDo9GBJqD+paWfBeNYhEpTeUnEGUtVKv/QBILf2nT+OfWf8vgecq1K7mFWlrlcqeC0y",
+ "sOO2S4XG8gO5yMCVV+wLA7W8E1eb/c3QtOsoMimtlitNqpJoEVOZmo4JTS3/tA9JqV0v79hW/oWJMyA0",
+ "l0CzDZkDcCLmZtHtF8wIVfjOmde7nFQXf0CmgauUIgWlIEvqJ8l3gFYXrUQtTW/BEwKOANezECXIgspL",
+ "AmvFm+2A6k7STg1uHQniJJg+1OOm37aB3cnDbaQSiL9F0ewiijIHZ3iJoHAkTtAuwD7z/vlJLrt9VYkP",
+ "20eeQLJfj1mBlzCnXChIBc/U8ENlu44tFuMP1qLMCoKTEn0v3Aw8oBS8pEq/dQbj8D2XoOi/mWLLy2pD",
+ "BafNyL/U5aZ7Y6eGX3JVqbomtdMTIYutgcN6y1yvYV3PhcZ4P3atiGpBKgW7Rh7CUjC+Q5YKn0rTgakd",
+ "S/L3F4dFDqhTIvuobAHRIGIbIO98qwC7oRl4ABB8qLoM1QD3Lk8D11yIHCi39jxRlub86aTidb8hNL2z",
+ "rQ/135u2feJyyeHI1zMBKjQSOMjPLWYVRpCvqCIODlLQU2dHWLoc7T7M5jAm6LdLtlG+OZbvTKvwCOw4",
+ "pF2FNTz+rXPWORwd+o0S3SAR7NiFoQXHVOSvQva/qDzbdS58xmCGtokgEK9mHalw75wynSyEdG9n0oUG",
+ "GdFvO9WWKdPKGa6sBU8L548jOIJjKG4c9wZjU67KJbhaEHyRBbP7/ZwKM9WPQo4K4m7HRVCmScU185Wy",
+ "zHmrZcyvT4m+lZ5vpedb6flWer6Vnm+l51vp+VZ6/tzS85cKnk48n/bxK7GCG2TyTUr4txbrLdpIIKY6",
+ "JcGI6OYcb83W0EBzXBDL8XIthRpM+8bH2ZSoZAokNdMxTsqcGmkI1toXHyNzquC7x/Xjpq5khHuezfAa",
+ "0+DRQ/Lur4c+mmrlon7abe+6om1E6U0O91xWW/1+kk9vA24w6LLbqNd+Uhf1ZoX5BcuBKIOrF9j6OZxB",
+ "biR5G6hBjC7S146OgebPHG52KEetF3LMaB+mLZ3Moa2gZfAMJa6VKkIx8q7zwM2C5mr4hRs7XkHLWPm2",
+ "mk9btQlZww8i23TI3ezaHm5gm9CbmCrGqdxEgiV75N0jDS0M83GE1df7Pl175F+faPtktovC4u9Kq+ih",
+ "3Ebl0ZC3esN6Q9mwy0WHTqLPu3XjvCY1gGNCHQw9+z0hb22/L5vqgxC5I9Zw5q/G8dxuWTMNbGsEKsd6",
+ "vtW8HI/46OnFsz81hJ1VKeCL2Y7i1olptASeON6SzEW2SVqcqX3BZExRpaCY775kQtaIh6m+V8yX7VfQ",
+ "l7khngeL28ZuQ3pYJ463DjBeG8w6ju3W2MIRHecNMP65ue8QhwxBII71xHTnbnXqC/KzZprNLU+75WnB",
+ "aexc9oy7OOouE5ldjqfJjaz4MDt7sYa0MvOGh/SuumdYFmJ0rVuW+wzm1XJpBPa+FRqrJON4zcPkN83l",
+ "7HLHMriLEYcdvC4tdNWs+O5wfcYRhAPfFZIspajKe7ZmPd+ggbMoKd94p4bR/Isqtzi0lTyul4faKObY",
+ "S8PeuDZsl3vjzW+B9cndou3fLVrIOVXuxVnISMUxkTOW67DuPIK8G+PHa95w4K1PJNv1Rlbn5h3D/f0u",
+ "u8jG2pFTgkz0mtsD1TpMLqfCntzZbfmsP8eN8Ma+DTHAYPv5AQ1D2H0xyIBl4c3QKabsr4Y2P31Lz8PS",
+ "zNclNI7X1leAd2KtvUYqTxsxUgqapVShUYODPhfy9DPLknp9FLEiI5j4gkA/O25cWQwcd5RI2U5I9Vp5",
+ "Ncd0bsG/dG2MJvPp0BVkamHj1rD7RzHs/uAPnyKUSHrePZzWh4NncgSboud6zaNcaq+0LxANxS+HqeW2",
+ "5bVGYvSGbwdkBO//WIcy5CWhJM0ZupsFV1pWqT7hFB1awcL6xfhrN92wKPXMN4n7VCMuTzfUCTdC1YLU",
+ "bq6oSLWAiAP7RwAvsalquQSlO5x4AXDCXSvGScWZxrkKlkqR2Iwkc10bjj6zLQu6IQuao0f2d5CCzI0S",
+ "EValRveQ0izPXXSImYaIxQmnmuRgmP4rZgQ6M5z3INQRT5buaizEk5GXwEExlcStsz/Zr5jv65bvvQDo",
+ "rLCffZ7eTSf6ethZNgj50XP3YsTRcywC3sSF9GC/sWCBgvEkSmTmxnfxVV3aIneNjOcJ6F4TYeJ2/YQb",
+ "YVoLgoye6suRQ9ep2zuL9nR0qKa1ER3fr1/r+1i1wqVIjMpIl+b3JdOraj5LRbHnqxjuLUVd0XAvo1AI",
+ "jt+yPVqyPVVCunf2YId8cAV+RSLs6vbm/gMlEQV0YE5LvfH4Ml537wfu5Wt4oOvrfpVrZ8Dp7RtYt29g",
+ "3b6SdPsG1u3u3r6BdftC1O0LUX/WF6JmWyVEVyVw55stumfapERCameuGXjYrPW6S98ryfSMkOOV4f/U",
+ "3AFwBpLmJKXKCkbcxj0XWBtRVWkKkB2c8KQFia2IaCa+2/zXqrkn1f7+IyD797p9rN0i4Lz9viiq4id0",
+ "NZHvycnkZNIbSUIhzsCVlMbmWYXhL7bXzmH/Vz3uz7K3dQXdWOPKipYlmGtNVYsFS5lFeS6MMrAUnWht",
+ "LvALSAOcrdFGmLbPaiE+McrdxcRQVwApJnT37/ej4O2YXQ/odCtp3WjtxT+ugL2NT/U37Pp44Naxewzx",
+ "lmXcBMv44kzjD/TCxu1jGl/ZgkJHauu1rKsU5ikhZQuWxuxOXkay5mTDm3EESCvJ9AZvOFqy307B/P+9",
+ "4eMK5Jm//CqZTw4mK63Lg709fM9yJZTem5irqfmmOh/N/UCXdgR3uZSSneFbOO8//f8AAAD//xjSW+CP",
+ "MgEA",
}
// GetSwagger returns the Swagger specification corresponding to the generated code
diff --git a/daemon/algod/api/server/v2/generated/types.go b/daemon/algod/api/server/v2/generated/types.go
index c3d71a7aa..b52d1286f 100644
--- a/daemon/algod/api/server/v2/generated/types.go
+++ b/daemon/algod/api/server/v2/generated/types.go
@@ -626,6 +626,13 @@ type ApplicationResponse Application
// AssetResponse defines model for AssetResponse.
type AssetResponse Asset
+// BlockHashResponse defines model for BlockHashResponse.
+type BlockHashResponse struct {
+
+ // Block header hash.
+ BlockHash string `json:"blockHash"`
+}
+
// BlockResponse defines model for BlockResponse.
type BlockResponse struct {
diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go
index 5898c2835..dadaf5263 100644
--- a/daemon/algod/api/server/v2/handlers.go
+++ b/daemon/algod/api/server/v2/handlers.go
@@ -589,7 +589,12 @@ func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params generated.Ge
if handle == protocol.CodecHandle {
blockbytes, err := rpcs.RawBlockBytes(v2.Node.LedgerForAPI(), basics.Round(round))
if err != nil {
- return internalError(ctx, err, err.Error(), v2.Log)
+ switch err.(type) {
+ case ledgercore.ErrNoEntry:
+ return notFound(ctx, err, errFailedLookingUpLedger, v2.Log)
+ default:
+ return internalError(ctx, err, err.Error(), v2.Log)
+ }
}
ctx.Response().Writer.Header().Add("X-Algorand-Struct", "block-v1")
@@ -599,7 +604,12 @@ func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params generated.Ge
ledger := v2.Node.LedgerForAPI()
block, _, err := ledger.BlockCert(basics.Round(round))
if err != nil {
- return internalError(ctx, err, errFailedLookingUpLedger, v2.Log)
+ switch err.(type) {
+ case ledgercore.ErrNoEntry:
+ return notFound(ctx, err, errFailedLookingUpLedger, v2.Log)
+ default:
+ return internalError(ctx, err, errFailedLookingUpLedger, v2.Log)
+ }
}
// Encoding wasn't working well without embedding "real" objects.
@@ -617,6 +627,25 @@ func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params generated.Ge
return ctx.Blob(http.StatusOK, contentType, data)
}
+// GetBlockHash gets the block hash for the given round.
+// (GET /v2/blocks/{round}/hash)
+func (v2 *Handlers) GetBlockHash(ctx echo.Context, round uint64) error {
+ ledger := v2.Node.LedgerForAPI()
+ block, _, err := ledger.BlockCert(basics.Round(round))
+ if err != nil {
+ switch err.(type) {
+ case ledgercore.ErrNoEntry:
+ return notFound(ctx, err, errFailedLookingUpLedger, v2.Log)
+ default:
+ return internalError(ctx, err, errFailedLookingUpLedger, v2.Log)
+ }
+ }
+
+ response := generated.BlockHashResponse{BlockHash: crypto.Digest(block.Hash()).String()}
+
+ return ctx.JSON(http.StatusOK, response)
+}
+
// GetTransactionProof generates a Merkle proof for a transaction in a block.
// (GET /v2/blocks/{round}/transactions/{txid}/proof)
func (v2 *Handlers) GetTransactionProof(ctx echo.Context, round uint64, txid string, params generated.GetTransactionProofParams) error {
diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go
index ad3d2ee69..588743a59 100644
--- a/daemon/algod/api/server/v2/test/handlers_test.go
+++ b/daemon/algod/api/server/v2/test/handlers_test.go
@@ -122,7 +122,8 @@ func TestGetBlock(t *testing.T) {
getBlockTest(t, 0, "json", 200)
getBlockTest(t, 0, "msgpack", 200)
- getBlockTest(t, 1, "json", 500)
+ getBlockTest(t, 1, "json", 404)
+ getBlockTest(t, 1, "msgpack", 404)
getBlockTest(t, 0, "bad format", 400)
}
@@ -207,6 +208,74 @@ func addBlockHelper(t *testing.T) (v2.Handlers, echo.Context, *httptest.Response
return handler, c, rec, stx, releasefunc
}
+func TestGetBlockHash(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t)
+ defer releasefunc()
+
+ err := handler.GetBlockHash(c, 0)
+ require.NoError(t, err)
+ require.Equal(t, 200, rec.Code)
+
+ c, rec = newReq(t)
+ err = handler.GetBlockHash(c, 1)
+ require.NoError(t, err)
+ require.Equal(t, 404, rec.Code)
+}
+
+func TestGetBlockGetBlockHash(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ a := require.New(t)
+
+ handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t)
+ defer releasefunc()
+ insertRounds(a, handler, 2)
+
+ type blockResponse struct {
+ Block bookkeeping.Block `codec:"block"`
+ }
+
+ var block1, block2 blockResponse
+ var block1Hash generatedV2.BlockHashResponse
+ format := "json"
+
+ // Get block 1
+ err := handler.GetBlock(c, 1, generatedV2.GetBlockParams{Format: &format})
+ a.NoError(err)
+ a.Equal(200, rec.Code)
+ err = protocol.DecodeJSON(rec.Body.Bytes(), &block1)
+ a.NoError(err)
+
+ // Get block 2
+ c, rec = newReq(t)
+ err = handler.GetBlock(c, 2, generatedV2.GetBlockParams{Format: &format})
+ a.NoError(err)
+ a.Equal(200, rec.Code)
+ err = protocol.DecodeJSON(rec.Body.Bytes(), &block2)
+ a.NoError(err)
+
+ // Get block 1 hash
+ c, rec = newReq(t)
+ err = handler.GetBlockHash(c, 1)
+ a.NoError(err)
+ a.Equal(200, rec.Code)
+ err = protocol.DecodeJSON(rec.Body.Bytes(), &block1Hash)
+ a.NoError(err)
+
+ // Validate that the block returned from GetBlock(1) has the same hash that is returned via GetBlockHash(1)
+ a.Equal(crypto.HashObj(block1.Block.BlockHeader).String(), block1Hash.BlockHash)
+
+ // Validate that the block returned from GetBlock(2) has the same prev-hash that is returned via GetBlockHash(1)
+ hash := block2.Block.Branch.String()
+ a.Equal(fmt.Sprintf("blk-%s", block1Hash.BlockHash), hash)
+
+ // Sanity check that the hashes are not equal (i.e. they are not the default values)
+ a.NotEqual(block1.Block.Branch, block2.Block.Branch)
+}
+
func TestGetBlockJsonEncoding(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
diff --git a/daemon/algod/server.go b/daemon/algod/server.go
index c423e8de2..4f1ce3515 100644
--- a/daemon/algod/server.go
+++ b/daemon/algod/server.go
@@ -21,7 +21,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"net"
"net/http"
_ "net/http/pprof" // net/http/pprof is for registering the pprof URLs with the web server, so http://localhost:8080/debug/pprof/ works.
@@ -264,13 +263,25 @@ func (s *Server) Start() {
// quit earlier than these service files get created
s.pidFile = filepath.Join(s.RootPath, "algod.pid")
s.netFile = filepath.Join(s.RootPath, "algod.net")
- ioutil.WriteFile(s.pidFile, []byte(fmt.Sprintf("%d\n", os.Getpid())), 0644)
- ioutil.WriteFile(s.netFile, []byte(fmt.Sprintf("%s\n", addr)), 0644)
+ err = os.WriteFile(s.pidFile, []byte(fmt.Sprintf("%d\n", os.Getpid())), 0644)
+ if err != nil {
+ fmt.Printf("pidfile error: %v\n", err)
+ os.Exit(1)
+ }
+ err = os.WriteFile(s.netFile, []byte(fmt.Sprintf("%s\n", addr)), 0644)
+ if err != nil {
+ fmt.Printf("netfile error: %v\n", err)
+ os.Exit(1)
+ }
listenAddr, listening := s.node.ListeningAddress()
if listening {
s.netListenFile = filepath.Join(s.RootPath, "algod-listen.net")
- ioutil.WriteFile(s.netListenFile, []byte(fmt.Sprintf("%s\n", listenAddr)), 0644)
+ err = os.WriteFile(s.netListenFile, []byte(fmt.Sprintf("%s\n", listenAddr)), 0644)
+ if err != nil {
+ fmt.Printf("netlistenfile error: %v\n", err)
+ os.Exit(1)
+ }
}
errChan := make(chan error, 1)
diff --git a/daemon/kmd/config/config.go b/daemon/kmd/config/config.go
index 9b932d216..95a03fe0d 100644
--- a/daemon/kmd/config/config.go
+++ b/daemon/kmd/config/config.go
@@ -18,7 +18,7 @@ package config
import (
"encoding/json"
- "io/ioutil"
+ "os"
"path/filepath"
"github.com/algorand/go-algorand/util/codecs"
@@ -103,7 +103,7 @@ func (k KMDConfig) Validate() error {
func LoadKMDConfig(dataDir string) (cfg KMDConfig, err error) {
cfg = defaultConfig(dataDir)
configFilename := filepath.Join(dataDir, kmdConfigFilename)
- dat, err := ioutil.ReadFile(configFilename)
+ dat, err := os.ReadFile(configFilename)
// If there is no config file, then return the default configuration, and dump the default config to disk
if err != nil {
exampleFilename := filepath.Join(dataDir, kmdConfigExampleFilename)
diff --git a/daemon/kmd/server/server.go b/daemon/kmd/server/server.go
index 973df186b..b36c2859c 100644
--- a/daemon/kmd/server/server.go
+++ b/daemon/kmd/server/server.go
@@ -19,7 +19,6 @@ package server
import (
"context"
"fmt"
- "io/ioutil"
"net"
"net/http"
"os"
@@ -144,12 +143,12 @@ func (ws *WalletServer) releaseFileLock() error {
// Write out a file containing the address kmd is listening on
func (ws *WalletServer) writeStateFiles(netAddr string) (err error) {
// netPath file contains path to sock file
- err = ioutil.WriteFile(ws.netPath, []byte(netAddr), 0640)
+ err = os.WriteFile(ws.netPath, []byte(netAddr), 0640)
if err != nil {
return
}
// pidPath file contains current process ID
- err = ioutil.WriteFile(ws.pidPath, []byte(fmt.Sprintf("%d", os.Getpid())), 0640)
+ err = os.WriteFile(ws.pidPath, []byte(fmt.Sprintf("%d", os.Getpid())), 0640)
return
}
diff --git a/daemon/kmd/wallet/driver/sqlite.go b/daemon/kmd/wallet/driver/sqlite.go
index eb78f4a77..8ad659c28 100644
--- a/daemon/kmd/wallet/driver/sqlite.go
+++ b/daemon/kmd/wallet/driver/sqlite.go
@@ -20,7 +20,6 @@ import (
"bytes"
"crypto/subtle"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"regexp"
@@ -231,7 +230,7 @@ func walletMetadataFromDBPath(dbPath string) (metadata wallet.Metadata, err erro
func (swd *SQLiteWalletDriver) potentialWalletPaths() (paths []string, err error) {
// List all files and folders in the wallets directory
wDir := swd.walletsDir()
- files, err := ioutil.ReadDir(wDir)
+ files, err := os.ReadDir(wDir)
if err != nil {
return
}
diff --git a/data/abi/abi_encode.go b/data/abi/abi_encode.go
deleted file mode 100644
index b0f8b6c12..000000000
--- a/data/abi/abi_encode.go
+++ /dev/null
@@ -1,617 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package abi
-
-import (
- "encoding/binary"
- "encoding/json"
- "fmt"
- "math/big"
- "reflect"
- "strings"
-)
-
-// typeCastToTuple cast an array-like ABI type into an ABI tuple type.
-func (t Type) typeCastToTuple(tupLen ...int) (Type, error) {
- var childT []Type
-
- switch t.abiTypeID {
- case String:
- if len(tupLen) != 1 {
- return Type{}, fmt.Errorf("string type conversion to tuple need 1 length argument")
- }
- childT = make([]Type, tupLen[0])
- for i := 0; i < tupLen[0]; i++ {
- childT[i] = byteType
- }
- case Address:
- childT = make([]Type, addressByteSize)
- for i := 0; i < addressByteSize; i++ {
- childT[i] = byteType
- }
- case ArrayStatic:
- childT = make([]Type, t.staticLength)
- for i := 0; i < int(t.staticLength); i++ {
- childT[i] = t.childTypes[0]
- }
- case ArrayDynamic:
- if len(tupLen) != 1 {
- return Type{}, fmt.Errorf("dynamic array type conversion to tuple need 1 length argument")
- }
- childT = make([]Type, tupLen[0])
- for i := 0; i < tupLen[0]; i++ {
- childT[i] = t.childTypes[0]
- }
- default:
- return Type{}, fmt.Errorf("type cannot support conversion to tuple")
- }
-
- tuple, err := MakeTupleType(childT)
- if err != nil {
- return Type{}, err
- }
- return tuple, nil
-}
-
-// Encode is an ABI type method to encode go values into bytes following ABI encoding rules
-func (t Type) Encode(value interface{}) ([]byte, error) {
- switch t.abiTypeID {
- case Uint, Ufixed:
- return encodeInt(value, t.bitSize)
- case Bool:
- boolValue, ok := value.(bool)
- if !ok {
- return nil, fmt.Errorf("cannot cast value to bool in bool encoding")
- }
- if boolValue {
- return []byte{0x80}, nil
- }
- return []byte{0x00}, nil
- case Byte:
- byteValue, ok := value.(byte)
- if !ok {
- return nil, fmt.Errorf("cannot cast value to byte in byte encoding")
- }
- return []byte{byteValue}, nil
- case ArrayStatic, Address:
- castedType, err := t.typeCastToTuple()
- if err != nil {
- return nil, err
- }
- return castedType.Encode(value)
- case ArrayDynamic:
- dynamicArray, err := inferToSlice(value)
- if err != nil {
- return nil, err
- }
- castedType, err := t.typeCastToTuple(len(dynamicArray))
- if err != nil {
- return nil, err
- }
- lengthEncode := make([]byte, lengthEncodeByteSize)
- binary.BigEndian.PutUint16(lengthEncode, uint16(len(dynamicArray)))
- encoded, err := castedType.Encode(value)
- if err != nil {
- return nil, err
- }
- encoded = append(lengthEncode, encoded...)
- return encoded, nil
- case String:
- stringValue, okString := value.(string)
- if !okString {
- return nil, fmt.Errorf("cannot cast value to string or array dynamic in encoding")
- }
- byteValue := []byte(stringValue)
- castedType, err := t.typeCastToTuple(len(byteValue))
- if err != nil {
- return nil, err
- }
- lengthEncode := make([]byte, lengthEncodeByteSize)
- binary.BigEndian.PutUint16(lengthEncode, uint16(len(byteValue)))
- encoded, err := castedType.Encode(byteValue)
- if err != nil {
- return nil, err
- }
- encoded = append(lengthEncode, encoded...)
- return encoded, nil
- case Tuple:
- return encodeTuple(value, t.childTypes)
- default:
- return nil, fmt.Errorf("cannot infer type for encoding")
- }
-}
-
-// encodeInt encodes int-alike golang values to bytes, following ABI encoding rules
-func encodeInt(intValue interface{}, bitSize uint16) ([]byte, error) {
- var bigInt *big.Int
-
- switch intValue := intValue.(type) {
- case int8:
- bigInt = big.NewInt(int64(intValue))
- case uint8:
- bigInt = new(big.Int).SetUint64(uint64(intValue))
- case int16:
- bigInt = big.NewInt(int64(intValue))
- case uint16:
- bigInt = new(big.Int).SetUint64(uint64(intValue))
- case int32:
- bigInt = big.NewInt(int64(intValue))
- case uint32:
- bigInt = new(big.Int).SetUint64(uint64(intValue))
- case int64:
- bigInt = big.NewInt(intValue)
- case uint64:
- bigInt = new(big.Int).SetUint64(intValue)
- case uint:
- bigInt = new(big.Int).SetUint64(uint64(intValue))
- case int:
- bigInt = big.NewInt(int64(intValue))
- case *big.Int:
- bigInt = intValue
- default:
- return nil, fmt.Errorf("cannot infer go type for uint encode")
- }
-
- if bigInt.Sign() < 0 {
- return nil, fmt.Errorf("passed in numeric value should be non negative")
- }
-
- castedBytes := make([]byte, bitSize/8)
-
- if bigInt.Cmp(new(big.Int).Lsh(big.NewInt(1), uint(bitSize))) >= 0 {
- return nil, fmt.Errorf("input value bit size %d > abi type bit size %d", bigInt.BitLen(), bitSize)
- }
-
- bigInt.FillBytes(castedBytes)
- return castedBytes, nil
-}
-
-// inferToSlice infers an interface element to a slice of interface{}, returns error if it cannot infer successfully
-func inferToSlice(value interface{}) ([]interface{}, error) {
- reflectVal := reflect.ValueOf(value)
- if reflectVal.Kind() != reflect.Slice && reflectVal.Kind() != reflect.Array {
- return nil, fmt.Errorf("cannot infer an interface value as a slice of interface element")
- }
- // * if input is a slice, with nil, then reflectVal.Len() == 0
- // * if input is an array, it is not possible it is nil
- values := make([]interface{}, reflectVal.Len())
- for i := 0; i < reflectVal.Len(); i++ {
- values[i] = reflectVal.Index(i).Interface()
- }
- return values, nil
-}
-
-// encodeTuple encodes slice-of-interface of golang values to bytes, following ABI encoding rules
-func encodeTuple(value interface{}, childT []Type) ([]byte, error) {
- if len(childT) >= abiEncodingLengthLimit {
- return nil, fmt.Errorf("abi child type number exceeds uint16 maximum")
- }
- values, err := inferToSlice(value)
- if err != nil {
- return nil, err
- }
- if len(values) != len(childT) {
- return nil, fmt.Errorf("cannot encode abi tuple: value slice length != child type number")
- }
-
- // for each tuple element value, it has a head/tail component
- // we create slots for head/tail bytes now, store them and concat them later
- heads := make([][]byte, len(childT))
- tails := make([][]byte, len(childT))
- isDynamicIndex := make(map[int]bool)
-
- for i := 0; i < len(childT); i++ {
- if childT[i].IsDynamic() {
- // if it is a dynamic value, the head component is not pre-determined
- // we store an empty placeholder first, since we will need it in byte length calculation
- headsPlaceholder := []byte{0x00, 0x00}
- heads[i] = headsPlaceholder
- // we keep track that the index points to a dynamic value
- isDynamicIndex[i] = true
- tailEncoding, err := childT[i].Encode(values[i])
- if err != nil {
- return nil, err
- }
- tails[i] = tailEncoding
- isDynamicIndex[i] = true
- } else if childT[i].abiTypeID == Bool {
- // search previous bool
- before := findBoolLR(childT, i, -1)
- // search after bool
- after := findBoolLR(childT, i, 1)
- // append to heads and tails
- if before%8 != 0 {
- return nil, fmt.Errorf("cannot encode abi tuple: expected before has number of bool mod 8 == 0")
- }
- if after > 7 {
- after = 7
- }
- compressed, err := compressBools(values[i : i+after+1])
- if err != nil {
- return nil, err
- }
- heads[i] = []byte{compressed}
- i += after
- isDynamicIndex[i] = false
- } else {
- encodeTi, err := childT[i].Encode(values[i])
- if err != nil {
- return nil, err
- }
- heads[i] = encodeTi
- isDynamicIndex[i] = false
- }
- }
-
- // adjust heads for dynamic type
- // since head size can be pre-determined (for we are storing static value and dynamic value index in head)
- // we accumulate the head size first
- // (also note that though head size is pre-determined, head value is not necessarily pre-determined)
- headLength := 0
- for _, headTi := range heads {
- headLength += len(headTi)
- }
-
- // when we iterate through the heads (byte slice), we need to find heads for dynamic values
- // the head should correspond to the start index: len( head(x[1]) ... head(x[N]) tail(x[1]) ... tail(x[i-1]) ).
- tailCurrLength := 0
- for i := 0; i < len(heads); i++ {
- if isDynamicIndex[i] {
- // calculate where the index of dynamic value encoding byte start
- headValue := headLength + tailCurrLength
- if headValue >= abiEncodingLengthLimit {
- return nil, fmt.Errorf("cannot encode abi tuple: encode length exceeds uint16 maximum")
- }
- binary.BigEndian.PutUint16(heads[i], uint16(headValue))
- }
- // accumulate the current tailing dynamic encoding bytes length.
- tailCurrLength += len(tails[i])
- }
-
- // concat everything as the abi encoded bytes
- encoded := make([]byte, 0, headLength+tailCurrLength)
- for _, head := range heads {
- encoded = append(encoded, head...)
- }
- for _, tail := range tails {
- encoded = append(encoded, tail...)
- }
- return encoded, nil
-}
-
-// compressBools takes a slice of interface{} (which can be casted to bools) length <= 8
-// and compress the bool values into a uint8 integer
-func compressBools(boolSlice []interface{}) (uint8, error) {
- var res uint8 = 0
- if len(boolSlice) > 8 {
- return 0, fmt.Errorf("compressBools: cannot have slice length > 8")
- }
- for i := 0; i < len(boolSlice); i++ {
- temp, ok := boolSlice[i].(bool)
- if !ok {
- return 0, fmt.Errorf("compressBools: cannot cast slice element to bool")
- }
- if temp {
- res |= 1 << uint(7-i)
- }
- }
- return res, nil
-}
-
-// decodeUint decodes byte slice into golang int/big.Int
-func decodeUint(encoded []byte, bitSize uint16) (interface{}, error) {
- if len(encoded) != int(bitSize)/8 {
- return nil,
- fmt.Errorf("uint/ufixed decode: expected byte length %d, but got byte length %d", bitSize/8, len(encoded))
- }
- switch bitSize / 8 {
- case 1:
- return encoded[0], nil
- case 2:
- return uint16(new(big.Int).SetBytes(encoded).Uint64()), nil
- case 3, 4:
- return uint32(new(big.Int).SetBytes(encoded).Uint64()), nil
- case 5, 6, 7, 8:
- return new(big.Int).SetBytes(encoded).Uint64(), nil
- default:
- return new(big.Int).SetBytes(encoded), nil
- }
-}
-
-// Decode is an ABI type method to decode bytes to go values from ABI encoding rules
-func (t Type) Decode(encoded []byte) (interface{}, error) {
- switch t.abiTypeID {
- case Uint, Ufixed:
- return decodeUint(encoded, t.bitSize)
- case Bool:
- if len(encoded) != 1 {
- return nil, fmt.Errorf("boolean byte should be length 1 byte")
- }
- if encoded[0] == 0x00 {
- return false, nil
- } else if encoded[0] == 0x80 {
- return true, nil
- }
- return nil, fmt.Errorf("single boolean encoded byte should be of form 0x80 or 0x00")
- case Byte:
- if len(encoded) != 1 {
- return nil, fmt.Errorf("byte should be length 1")
- }
- return encoded[0], nil
- case ArrayStatic:
- castedType, err := t.typeCastToTuple()
- if err != nil {
- return nil, err
- }
- return castedType.Decode(encoded)
- case Address:
- if len(encoded) != addressByteSize {
- return nil, fmt.Errorf("address should be length 32")
- }
- return encoded, nil
- case ArrayDynamic:
- if len(encoded) < lengthEncodeByteSize {
- return nil, fmt.Errorf("dynamic array format corrupted")
- }
- dynamicLen := binary.BigEndian.Uint16(encoded[:lengthEncodeByteSize])
- castedType, err := t.typeCastToTuple(int(dynamicLen))
- if err != nil {
- return nil, err
- }
- return castedType.Decode(encoded[lengthEncodeByteSize:])
- case String:
- if len(encoded) < lengthEncodeByteSize {
- return nil, fmt.Errorf("string format corrupted")
- }
- stringLenBytes := encoded[:lengthEncodeByteSize]
- byteLen := binary.BigEndian.Uint16(stringLenBytes)
- if len(encoded[lengthEncodeByteSize:]) != int(byteLen) {
- return nil, fmt.Errorf("string representation in byte: length not matching")
- }
- return string(encoded[lengthEncodeByteSize:]), nil
- case Tuple:
- return decodeTuple(encoded, t.childTypes)
- default:
- return nil, fmt.Errorf("cannot infer type for decoding")
- }
-}
-
-// decodeTuple decodes byte slice with ABI type slice, outputting a slice of golang interface values
-// following ABI encoding rules
-func decodeTuple(encoded []byte, childT []Type) ([]interface{}, error) {
- dynamicSegments := make([]int, 0, len(childT)+1)
- valuePartition := make([][]byte, 0, len(childT))
- iterIndex := 0
-
- for i := 0; i < len(childT); i++ {
- if childT[i].IsDynamic() {
- if len(encoded[iterIndex:]) < lengthEncodeByteSize {
- return nil, fmt.Errorf("ill formed tuple dynamic typed value encoding")
- }
- dynamicIndex := binary.BigEndian.Uint16(encoded[iterIndex : iterIndex+lengthEncodeByteSize])
- dynamicSegments = append(dynamicSegments, int(dynamicIndex))
- valuePartition = append(valuePartition, nil)
- iterIndex += lengthEncodeByteSize
- } else if childT[i].abiTypeID == Bool {
- // search previous bool
- before := findBoolLR(childT, i, -1)
- // search after bool
- after := findBoolLR(childT, i, 1)
- if before%8 == 0 {
- if after > 7 {
- after = 7
- }
- // parse bool in a byte to multiple byte strings
- for boolIndex := uint(0); boolIndex <= uint(after); boolIndex++ {
- boolMask := 0x80 >> boolIndex
- if encoded[iterIndex]&byte(boolMask) > 0 {
- valuePartition = append(valuePartition, []byte{0x80})
- } else {
- valuePartition = append(valuePartition, []byte{0x00})
- }
- }
- i += after
- iterIndex++
- } else {
- return nil, fmt.Errorf("expected before bool number mod 8 == 0")
- }
- } else {
- // not bool ...
- currLen, err := childT[i].ByteLen()
- if err != nil {
- return nil, err
- }
- valuePartition = append(valuePartition, encoded[iterIndex:iterIndex+currLen])
- iterIndex += currLen
- }
- if i != len(childT)-1 && iterIndex >= len(encoded) {
- return nil, fmt.Errorf("input byte not enough to decode")
- }
- }
-
- if len(dynamicSegments) > 0 {
- dynamicSegments = append(dynamicSegments, len(encoded))
- iterIndex = len(encoded)
- }
- if iterIndex < len(encoded) {
- return nil, fmt.Errorf("input byte not fully consumed")
- }
- for i := 0; i < len(dynamicSegments)-1; i++ {
- if dynamicSegments[i] > dynamicSegments[i+1] {
- return nil, fmt.Errorf("dynamic segment should display a [l, r] space with l <= r")
- }
- }
-
- segIndex := 0
- for i := 0; i < len(childT); i++ {
- if childT[i].IsDynamic() {
- valuePartition[i] = encoded[dynamicSegments[segIndex]:dynamicSegments[segIndex+1]]
- segIndex++
- }
- }
-
- values := make([]interface{}, len(childT))
- for i := 0; i < len(childT); i++ {
- var err error
- values[i], err = childT[i].Decode(valuePartition[i])
- if err != nil {
- return nil, err
- }
- }
- return values, nil
-}
-
-// maxAppArgs is the maximum number of arguments for an application call transaction, in compliance
-// with ARC-4. Currently this is the same as the MaxAppArgs consensus parameter, but the
-// difference is that the consensus parameter is liable to change in a future consensus upgrade.
-// However, the ARC-4 ABI argument encoding **MUST** always remain the same.
-const maxAppArgs = 16
-
-// The tuple threshold is maxAppArgs, minus 1 for the method selector in the first app arg,
-// minus 1 for the final app argument becoming a tuple of the remaining method args
-const methodArgsTupleThreshold = maxAppArgs - 2
-
-// ParseArgJSONtoByteSlice convert input method arguments to ABI encoded bytes
-// it converts funcArgTypes into a tuple type and apply changes over input argument string (in JSON format)
-// if there are greater or equal to 15 inputs, then we compact the tailing inputs into one tuple
-func ParseArgJSONtoByteSlice(argTypes []string, jsonArgs []string, applicationArgs *[][]byte) error {
- abiTypes := make([]Type, len(argTypes))
- for i, typeString := range argTypes {
- abiType, err := TypeOf(typeString)
- if err != nil {
- return err
- }
- abiTypes[i] = abiType
- }
-
- if len(abiTypes) != len(jsonArgs) {
- return fmt.Errorf("input argument number %d != method argument number %d", len(jsonArgs), len(abiTypes))
- }
-
- // Up to 16 app arguments can be passed to app call. First is reserved for method selector,
- // and the rest are for method call arguments. But if more than 15 method call arguments
- // are present, then the method arguments after the 14th are placed in a tuple in the last
- // app argument slot
- if len(abiTypes) > maxAppArgs-1 {
- typesForTuple := make([]Type, len(abiTypes)-methodArgsTupleThreshold)
- copy(typesForTuple, abiTypes[methodArgsTupleThreshold:])
-
- compactedType, err := MakeTupleType(typesForTuple)
- if err != nil {
- return err
- }
-
- abiTypes = append(abiTypes[:methodArgsTupleThreshold], compactedType)
-
- tupleValues := make([]json.RawMessage, len(jsonArgs)-methodArgsTupleThreshold)
- for i, jsonArg := range jsonArgs[methodArgsTupleThreshold:] {
- tupleValues[i] = []byte(jsonArg)
- }
-
- remainingJSON, err := json.Marshal(tupleValues)
- if err != nil {
- return err
- }
-
- jsonArgs = append(jsonArgs[:methodArgsTupleThreshold], string(remainingJSON))
- }
-
- // parse JSON value to ABI encoded bytes
- for i := 0; i < len(jsonArgs); i++ {
- interfaceVal, err := abiTypes[i].UnmarshalFromJSON([]byte(jsonArgs[i]))
- if err != nil {
- return err
- }
- abiEncoded, err := abiTypes[i].Encode(interfaceVal)
- if err != nil {
- return err
- }
- *applicationArgs = append(*applicationArgs, abiEncoded)
- }
- return nil
-}
-
-// ParseMethodSignature parses a method of format `method(argType1,argType2,...)retType`
-// into `method` {`argType1`,`argType2`,...} and `retType`
-func ParseMethodSignature(methodSig string) (name string, argTypes []string, returnType string, err error) {
- argsStart := strings.Index(methodSig, "(")
- if argsStart == -1 {
- err = fmt.Errorf(`No parenthesis in method signature: "%s"`, methodSig)
- return
- }
-
- if argsStart == 0 {
- err = fmt.Errorf(`Method signature has no name: "%s"`, methodSig)
- return
- }
-
- argsEnd := -1
- depth := 0
- for index, char := range methodSig {
- if char == '(' {
- depth++
- } else if char == ')' {
- if depth == 0 {
- err = fmt.Errorf(`Unpaired parenthesis in method signature: "%s"`, methodSig)
- return
- }
- depth--
- if depth == 0 {
- argsEnd = index
- break
- }
- }
- }
-
- if argsEnd == -1 {
- err = fmt.Errorf(`Unpaired parenthesis in method signature: "%s"`, methodSig)
- return
- }
-
- name = methodSig[:argsStart]
- argTypes, err = parseTupleContent(methodSig[argsStart+1 : argsEnd])
- returnType = methodSig[argsEnd+1:]
- return
-}
-
-// VerifyMethodSignature checks if a method signature and its referenced types can be parsed properly
-func VerifyMethodSignature(methodSig string) error {
- _, argTypes, retType, err := ParseMethodSignature(methodSig)
- if err != nil {
- return err
- }
-
- for i, argType := range argTypes {
- if IsReferenceType(argType) || IsTransactionType(argType) {
- continue
- }
-
- _, err = TypeOf(argType)
- if err != nil {
- return fmt.Errorf("Error parsing argument type at index %d: %s", i, err.Error())
- }
- }
-
- if retType != VoidReturnType {
- _, err = TypeOf(retType)
- if err != nil {
- return fmt.Errorf("Error parsing return type: %s", err.Error())
- }
- }
-
- return nil
-}
diff --git a/data/abi/abi_encode_test.go b/data/abi/abi_encode_test.go
deleted file mode 100644
index 231c1a0e0..000000000
--- a/data/abi/abi_encode_test.go
+++ /dev/null
@@ -1,1279 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package abi
-
-import (
- "crypto/rand"
- "encoding/binary"
- "fmt"
- "math/big"
- "testing"
-
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/chrismcguire/gobberish"
- "github.com/stretchr/testify/require"
-)
-
-const (
- uintStepLength = 8
- uintBegin = 8
- uintEnd = 512
- uintRandomTestPoints = 1000
- uintTestCaseCount = 200
- ufixedPrecision = 160
- ufixedRandomTestPoints = 20
- tupleMaxLength = 10
- byteTestCaseCount = 1 << 8
- boolTestCaseCount = 2
- addressTestCaseCount = 300
- stringTestCaseCount = 10
- stringTestCaseSpecLenCount = 5
- takeNum = 10
- tupleTestCaseCount = 100
-)
-
-/*
- The set of parameters ensure that the error of byte length >= 2^16 is eliminated.
-
- i. Consider uint512[] with length 10, the ABI encoding length is: 64 x 10 + 2
- (2 is introduced from dynamic array length encoding)
- The motivation here is that, forall ABI type that is non-array/non-tuple like,
- uint512 gives the longest byte length in ABI encoding
- (utf-8 string's byte length is at most 42, address byte length is at most 32)
-
- ii. Consider a tuple of length 10, with all elements uint512[] of length 10.
- The ABI encoding length is: 10 x 2 + 10 x 642 == 6440
- (2 is for tuple index to keep track of dynamic type encoding)
-
- iii. Consider a tuple of length 10, with all elements of tuples mentioned in (ii).
- The ABI encoding length is: 10 x 2 + 10 x 6440 == 64420
- This is the end of the generation of nested-tuple test case,
- no more layers of random tuples will be produced.
-
- This gives an upper bound for the produced ABI encoding byte length in this test script,
- and noticing that length 64420 mentioned in (iii) is less than 2^16 == 65536.
- Assuming that ABI implementation is correct, then the flaky test should not happen again.
-*/
-
-func TestEncodeValid(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- // encoding test for uint type, iterating through all uint sizes
- // randomly pick 1000 valid uint values and check if encoded value match with expected
- for intSize := uintBegin; intSize <= uintEnd; intSize += uintStepLength {
- upperLimit := new(big.Int).Lsh(big.NewInt(1), uint(intSize))
- uintType, err := makeUintType(intSize)
- require.NoError(t, err, "make uint type fail")
-
- for i := 0; i < uintRandomTestPoints; i++ {
- randomInt, err := rand.Int(rand.Reader, upperLimit)
- require.NoError(t, err, "cryptographic random int init fail")
-
- expected := make([]byte, intSize/8)
- randomInt.FillBytes(expected)
-
- uintEncode, err := uintType.Encode(randomInt)
- require.NoError(t, err, "encoding from uint type fail")
-
- require.Equal(t, expected, uintEncode, "encode uint not match with expected")
- }
- // 2^[bitSize] - 1 test
- // check if uint<bitSize> can contain max uint value (2^bitSize - 1)
- largest := new(big.Int).Add(
- upperLimit,
- new(big.Int).Neg(big.NewInt(1)),
- )
- encoded, err := uintType.Encode(largest)
- require.NoError(t, err, "largest uint encode error")
- require.Equal(t, largest.Bytes(), encoded, "encode uint largest do not match with expected")
- }
-
- // encoding test for ufixed, iterating through all the valid ufixed bitSize and precision
- // randomly generate 10 big int values for ufixed numerator and check if encoded value match with expected
- // also check if ufixed can fit max numerator (2^bitSize - 1) under specific byte bitSize
- for size := uintBegin; size <= uintEnd; size += uintStepLength {
- upperLimit := new(big.Int).Lsh(big.NewInt(1), uint(size))
- largest := big.NewInt(0).Add(
- upperLimit,
- new(big.Int).Neg(big.NewInt(1)),
- )
- for precision := 1; precision <= ufixedPrecision; precision++ {
- typeUfixed, err := makeUfixedType(size, precision)
- require.NoError(t, err, "make ufixed type fail")
-
- for i := 0; i < ufixedRandomTestPoints; i++ {
- randomInt, err := rand.Int(rand.Reader, upperLimit)
- require.NoError(t, err, "cryptographic random int init fail")
-
- encodedUfixed, err := typeUfixed.Encode(randomInt)
- require.NoError(t, err, "ufixed encode fail")
-
- expected := make([]byte, size/8)
- randomInt.FillBytes(expected)
- require.Equal(t, expected, encodedUfixed, "encode ufixed not match with expected")
- }
- // (2^[bitSize] - 1) / (10^[precision]) test
- ufixedLargestEncode, err := typeUfixed.Encode(largest)
- require.NoError(t, err, "largest ufixed encode error")
- require.Equal(t, largest.Bytes(), ufixedLargestEncode,
- "encode ufixed largest do not match with expected")
- }
- }
-
- // encoding test for address, since address is 32 byte, it can be considered as 256 bit uint
- // randomly generate 1000 uint256 and make address values, check if encoded value match with expected
- upperLimit := new(big.Int).Lsh(big.NewInt(1), addressByteSize<<3)
- for i := 0; i < uintRandomTestPoints; i++ {
- randomAddrInt, err := rand.Int(rand.Reader, upperLimit)
- require.NoError(t, err, "cryptographic random int init fail")
-
- addrBytesExpected := make([]byte, addressByteSize)
- randomAddrInt.FillBytes(addrBytesExpected)
-
- addrBytesActual, err := addressType.Encode(addrBytesExpected)
- require.NoError(t, err, "address encode fail")
- require.Equal(t, addrBytesExpected, addrBytesActual, "encode addr not match with expected")
- }
-
- // encoding test for bool values
- for i := 0; i < boolTestCaseCount; i++ {
- boolEncode, err := boolType.Encode(i == 1)
- require.NoError(t, err, "bool encode fail")
- expected := []byte{0x00}
- if i == 1 {
- expected = []byte{0x80}
- }
- require.Equal(t, expected, boolEncode, "encode bool not match with expected")
- }
-
- // encoding test for byte values
- for i := 0; i < byteTestCaseCount; i++ {
- byteEncode, err := byteType.Encode(byte(i))
- require.NoError(t, err, "byte encode fail")
- expected := []byte{byte(i)}
- require.Equal(t, expected, byteEncode, "encode byte not match with expected")
- }
-
- // encoding test for string values, since strings in ABI contain utf-8 symbols
- // we use `gobberish` to generate random utf-8 symbols
- // randomly generate utf-8 str from length 1 to 100, each length draw 10 random strs
- // check if encoded ABI str match with expected value
- for length := 1; length <= stringTestCaseCount; length++ {
- for i := 0; i < stringTestCaseSpecLenCount; i++ {
- // generate utf8 strings from `gobberish` at some length
- utf8Str := gobberish.GenerateString(length)
- // since string is just type alias of `byte[]`, we need to store number of bytes in encoding
- utf8ByteLen := len([]byte(utf8Str))
- lengthBytes := make([]byte, 2)
- binary.BigEndian.PutUint16(lengthBytes, uint16(utf8ByteLen))
- expected := append(lengthBytes, []byte(utf8Str)...)
-
- strEncode, err := stringType.Encode(utf8Str)
- require.NoError(t, err, "string encode fail")
- require.Equal(t, expected, strEncode, "encode string not match with expected")
- }
- }
-
- // encoding test for static bool array, the expected behavior of encoding is to
- // compress multiple bool into a single byte.
- // input: {T, F, F, T, T}, encode expected: {0b10011000}
- staticBoolArrType := makeStaticArrayType(boolType, 5)
- t.Run("static bool array encoding", func(t *testing.T) {
- inputBase := []bool{true, false, false, true, true}
- expected := []byte{
- 0b10011000,
- }
- boolArrEncode, err := staticBoolArrType.Encode(inputBase)
- require.NoError(t, err, "static bool array encoding should not return error")
- require.Equal(t, expected, boolArrEncode, "static bool array encode not match expected")
- })
-
- // encoding test for static bool array
- // input: {F, F, F, T, T, F, T, F, T, F, T}, encode expected: {0b00011010, 0b10100000}
- staticBoolArrType = makeStaticArrayType(boolType, 11)
- t.Run("static bool array encoding", func(t *testing.T) {
- inputBase := []bool{false, false, false, true, true, false, true, false, true, false, true}
- expected := []byte{
- 0b00011010, 0b10100000,
- }
- boolArrEncode, err := staticBoolArrType.Encode(inputBase)
- require.NoError(t, err, "static bool array encoding should not return error")
- require.Equal(t, expected, boolArrEncode, "static bool array encode not match expected")
- })
-
- // encoding test for dynamic bool array
- // input: {F, T, F, T, F, T, F, T, F, T}, encode expected: {0b01010101, 0b01000000}
- dynamicBoolArrayType := makeDynamicArrayType(boolType)
- t.Run("dynamic bool array encoding", func(t *testing.T) {
- inputBase := []bool{false, true, false, true, false, true, false, true, false, true}
- expected := []byte{
- 0x00, 0x0A, 0b01010101, 0b01000000,
- }
- boolArrEncode, err := dynamicBoolArrayType.Encode(inputBase)
- require.NoError(t, err, "dynamic bool array encoding should not return error")
- require.Equal(t, expected, boolArrEncode, "dynamic bool array encode not match expected")
- })
-
- // encoding test for dynamic tuple values
- // input type: (string, bool, bool, bool, bool, string)
- // input value: ("ABC", T, F, T, F, "DEF")
- /*
- encode expected:
- 0x00, 0x05 (first string start at 5th byte)
- 0b10100000 (4 bool tuple element compacted together)
- 0x00, 0x0A (second string start at 10th byte)
- 0x00, 0x03 (first string byte length 3)
- byte('A'), byte('B'), byte('C') (first string encoded bytes)
- 0x00, 0x03 (second string byte length 3)
- byte('D'), byte('E'), byte('F') (second string encoded bytes)
- */
- tupleType, err := TypeOf("(string,bool,bool,bool,bool,string)")
- require.NoError(t, err, "type from string for dynamic tuple type should not return error")
- t.Run("dynamic tuple encoding", func(t *testing.T) {
- inputBase := []interface{}{
- "ABC", true, false, true, false, "DEF",
- }
- expected := []byte{
- 0x00, 0x05, 0b10100000, 0x00, 0x0A,
- 0x00, 0x03, byte('A'), byte('B'), byte('C'),
- 0x00, 0x03, byte('D'), byte('E'), byte('F'),
- }
- stringTupleEncode, err := tupleType.Encode(inputBase)
- require.NoError(t, err, "string tuple encoding should not return error")
- require.Equal(t, expected, stringTupleEncode, "string tuple encoding not match expected")
- })
-
- // encoding test for tuples with static bool arrays
- // input type: {bool[2], bool[2]}
- // input value: ({T, T}, {T, T})
- /*
- encode expected:
- 0b11000000 (first static bool array)
- 0b11000000 (second static bool array)
- */
- tupleType, err = TypeOf("(bool[2],bool[2])")
- require.NoError(t, err, "type from string for tuple type should not return error")
- t.Run("static bool array tuple encoding", func(t *testing.T) {
- expected := []byte{
- 0b11000000,
- 0b11000000,
- }
- actual, err := tupleType.Encode([]interface{}{
- []bool{true, true},
- []bool{true, true},
- })
- require.NoError(t, err, "encode tuple value should not return error")
- require.Equal(t, expected, actual, "encode static bool tuple should be equal")
- })
-
- // encoding test for tuples with static and dynamic bool arrays
- // input type: (bool[2], bool[])
- // input value: ({T, T}, {T, T})
- /*
- encode expected:
- 0b11000000 (first static bool array)
- 0x00, 0x03 (second dynamic bool array starts at 3rd byte)
- 0x00, 0x02 (dynamic bool array length 2)
- 0b11000000 (second static bool array)
- */
- tupleType, err = TypeOf("(bool[2],bool[])")
- require.NoError(t, err, "type from string for tuple type should not return error")
- t.Run("static/dynamic bool array tuple encoding", func(t *testing.T) {
- expected := []byte{
- 0b11000000,
- 0x00, 0x03,
- 0x00, 0x02, 0b11000000,
- }
- actual, err := tupleType.Encode([]interface{}{
- []bool{true, true},
- []bool{true, true},
- })
- require.NoError(t, err, "tuple value encoding should not return error")
- require.Equal(t, expected, actual, "encode static/dynamic bool array tuple should not return error")
- })
-
- // encoding test for tuples with all dynamic bool arrays
- // input type: (bool[], bool[])
- // input values: ({}, {})
- /*
- encode expected:
- 0x00, 0x04 (first dynamic bool array starts at 4th byte)
- 0x00, 0x06 (second dynamic bool array starts at 6th byte)
- 0x00, 0x00 (first dynamic bool array length 0)
- 0x00, 0x00 (second dynamic bool array length 0)
- */
- tupleType, err = TypeOf("(bool[],bool[])")
- require.NoError(t, err, "type from string for tuple type should not return error")
- t.Run("empty dynamic array tuple encoding", func(t *testing.T) {
- expected := []byte{
- 0x00, 0x04, 0x00, 0x06,
- 0x00, 0x00, 0x00, 0x00,
- }
- actual, err := tupleType.Encode([]interface{}{
- []bool{}, []bool{},
- })
- require.NoError(t, err, "encode empty dynamic array tuple should not return error")
- require.Equal(t, expected, actual, "encode empty dynamic array tuple does not match with expected")
- })
-
- // encoding test for empty tuple
- // input: (), expected encoding: ""
- tupleType, err = TypeOf("()")
- require.NoError(t, err, "type from string for tuple type should not return error")
- t.Run("empty tuple encoding", func(t *testing.T) {
- expected := make([]byte, 0)
- actual, err := tupleType.Encode([]interface{}{})
- require.NoError(t, err, "encode empty tuple should not return error")
- require.Equal(t, expected, actual, "empty tuple encode should not return error")
- })
-}
-
-func TestDecodeValid(t *testing.T) {
- partitiontest.PartitionTest(t)
- // decoding test for uint, iterating through all valid uint bitSize
- // randomly take 1000 tests on each valid bitSize
- // generate bytes from random uint values and decode bytes with additional type information
- for intSize := uintBegin; intSize <= uintEnd; intSize += uintStepLength {
- upperLimit := new(big.Int).Lsh(big.NewInt(1), uint(intSize))
- uintType, err := makeUintType(intSize)
- require.NoError(t, err, "make uint type failure")
- for i := 0; i < uintRandomTestPoints; i++ {
- randBig, err := rand.Int(rand.Reader, upperLimit)
- require.NoError(t, err, "cryptographic random int init fail")
-
- var expected interface{}
- if intSize <= 64 && intSize > 32 {
- expected = randBig.Uint64()
- } else if intSize <= 32 && intSize > 16 {
- expected = uint32(randBig.Uint64())
- } else if intSize == 16 {
- expected = uint16(randBig.Uint64())
- } else if intSize == 8 {
- expected = uint8(randBig.Uint64())
- } else {
- expected = randBig
- }
-
- encodedUint, err := uintType.Encode(expected)
- require.NoError(t, err, "uint encode fail")
-
- actual, err := uintType.Decode(encodedUint)
- require.NoError(t, err, "decoding uint should not return error")
- require.Equal(t, expected, actual, "decode uint fail to match expected value")
- }
- }
-
- // decoding test for ufixed, iterating through all valid ufixed bitSize and precision
- // randomly take 10 tests on each valid setting
- // generate ufixed bytes and try to decode back with additional type information
- for size := uintBegin; size <= uintEnd; size += uintStepLength {
- upperLimit := big.NewInt(0).Lsh(big.NewInt(1), uint(size))
- for precision := 1; precision <= ufixedPrecision; precision++ {
- ufixedType, err := makeUfixedType(size, precision)
- require.NoError(t, err, "make ufixed type failure")
- for i := 0; i < ufixedRandomTestPoints; i++ {
- randBig, err := rand.Int(rand.Reader, upperLimit)
- require.NoError(t, err, "cryptographic random int init fail")
-
- var expected interface{}
- if size <= 64 && size > 32 {
- expected = randBig.Uint64()
- } else if size <= 32 && size > 16 {
- expected = uint32(randBig.Uint64())
- } else if size == 16 {
- expected = uint16(randBig.Uint64())
- } else if size == 8 {
- expected = uint8(randBig.Uint64())
- } else {
- expected = randBig
- }
-
- encodedUfixed, err := ufixedType.Encode(expected)
- require.NoError(t, err, "ufixed encode fail")
- require.NoError(t, err, "cast big integer to expected value should not return error")
-
- actual, err := ufixedType.Decode(encodedUfixed)
- require.NoError(t, err, "decoding ufixed should not return error")
- require.Equal(t, expected, actual, "decode ufixed fail to match expected value")
- }
- }
- }
-
- // decoding test for address, randomly take 300 tests
- // address is type alias of byte[32], we generate address value with random 256 bit big int values
- // we make the expected address value and decode the encoding of expected, check if they match
- upperLimit := new(big.Int).Lsh(big.NewInt(1), addressByteSize<<3)
- for i := 0; i < addressTestCaseCount; i++ {
- randomAddrInt, err := rand.Int(rand.Reader, upperLimit)
- require.NoError(t, err, "cryptographic random int init fail")
-
- expected := make([]byte, addressByteSize)
- randomAddrInt.FillBytes(expected)
-
- actual, err := addressType.Decode(expected)
- require.NoError(t, err, "decoding address should not return error")
- require.Equal(t, expected, actual, "decode addr not match with expected")
- }
-
- // bool value decoding test
- for i := 0; i < 2; i++ {
- boolEncode, err := boolType.Encode(i == 1)
- require.NoError(t, err, "bool encode fail")
- actual, err := boolType.Decode(boolEncode)
- require.NoError(t, err, "decoding bool should not return error")
- require.Equal(t, i == 1, actual, "decode bool not match with expected")
- }
-
- // byte value decoding test, iterating through 256 valid byte value
- for i := 0; i < byteTestCaseCount; i++ {
- byteEncode, err := byteType.Encode(byte(i))
- require.NoError(t, err, "byte encode fail")
- actual, err := byteType.Decode(byteEncode)
- require.NoError(t, err, "decoding byte should not return error")
- require.Equal(t, byte(i), actual, "decode byte not match with expected")
- }
-
- // string value decoding test, test from utf string length 1 to 10
- // randomly take 5 utf-8 strings to make ABI string values
- // decode the encoded expected value and check if they match
- for length := 1; length <= stringTestCaseCount; length++ {
- for i := 0; i < stringTestCaseSpecLenCount; i++ {
- expected := gobberish.GenerateString(length)
- strEncode, err := stringType.Encode(expected)
- require.NoError(t, err, "string encode fail")
- actual, err := stringType.Decode(strEncode)
- require.NoError(t, err, "decoding string should not return error")
- require.Equal(t, expected, actual, "encode string not match with expected")
- }
- }
-
- // decoding test for static bool array
- // expected value: bool[5]: {T, F, F, T, T}
- // input: 0b10011000
- t.Run("static bool array decode", func(t *testing.T) {
- staticBoolArrT, err := TypeOf("bool[5]")
- require.NoError(t, err, "make static bool array type failure")
- expected := []interface{}{true, false, false, true, true}
- actual, err := staticBoolArrT.Decode([]byte{0b10011000})
- require.NoError(t, err, "decoding static bool array should not return error")
- require.Equal(t, expected, actual, "static bool array decode do not match expected")
- })
-
- // decoding test for static bool array
- // expected value: bool[11]: F, F, F, T, T, F, T, F, T, F, T
- // input: 0b00011010, 0b10100000
- t.Run("static bool array decode", func(t *testing.T) {
- staticBoolArrT, err := TypeOf("bool[11]")
- require.NoError(t, err, "make static bool array type failure")
- expected := []interface{}{false, false, false, true, true, false, true, false, true, false, true}
- actual, err := staticBoolArrT.Decode([]byte{0b00011010, 0b10100000})
- require.NoError(t, err, "decoding static bool array should not return error")
- require.Equal(t, expected, actual, "static bool array decode do not match expected")
- })
-
- // decoding test for static uint array
- // expected input: uint64[8]: {1, 2, 3, 4, 5, 6, 7, 8}
- /*
- input: 0, 0, 0, 0, 0, 0, 0, 1 (encoding for uint64 1)
- 0, 0, 0, 0, 0, 0, 0, 2 (encoding for uint64 2)
- 0, 0, 0, 0, 0, 0, 0, 3 (encoding for uint64 3)
- 0, 0, 0, 0, 0, 0, 0, 4 (encoding for uint64 4)
- 0, 0, 0, 0, 0, 0, 0, 5 (encoding for uint64 5)
- 0, 0, 0, 0, 0, 0, 0, 6 (encoding for uint64 6)
- 0, 0, 0, 0, 0, 0, 0, 7 (encoding for uint64 7)
- 0, 0, 0, 0, 0, 0, 0, 8 (encoding for uint64 8)
- */
- t.Run("static uint array decode", func(t *testing.T) {
- staticUintArrT, err := TypeOf("uint64[8]")
- require.NoError(t, err, "make static uint array type failure")
- expected := []interface{}{
- uint64(1), uint64(2),
- uint64(3), uint64(4),
- uint64(5), uint64(6),
- uint64(7), uint64(8),
- }
- arrayEncoded, err := staticUintArrT.Encode(expected)
- require.NoError(t, err, "uint64 static array encode should not return error")
- actual, err := staticUintArrT.Decode(arrayEncoded)
- require.NoError(t, err, "uint64 static array decode should not return error")
- require.Equal(t, expected, actual, "uint64 static array decode do not match with expected value")
- })
-
- // decoding test for dynamic bool array
- // expected value: bool[]: {F, T, F, T, F, T, F, T, F, T}
- /*
- input bytes: 0x00, 0x0A (dynamic bool array length 10)
- 0b01010101, 0b01000000 (dynamic bool array encoding)
- */
- t.Run("dynamic bool array decode", func(t *testing.T) {
- dynamicBoolArrT, err := TypeOf("bool[]")
- require.NoError(t, err, "make dynamic bool array type failure")
- expected := []interface{}{false, true, false, true, false, true, false, true, false, true}
- inputEncoded := []byte{
- 0x00, 0x0A, 0b01010101, 0b01000000,
- }
- actual, err := dynamicBoolArrT.Decode(inputEncoded)
- require.NoError(t, err, "decode dynamic array should not return error")
- require.Equal(t, expected, actual, "decode dynamic array do not match expected")
- })
-
- // decoding test for dynamic tuple values
- // expected value type: (string, bool, bool, bool, bool, string)
- // expected value: ("ABC", T, F, T, F, "DEF")
- /*
- input bytes:
- 0x00, 0x05 (first string start at 5th byte)
- 0b10100000 (4 bool tuple element compacted together)
- 0x00, 0x0A (second string start at 10th byte)
- 0x00, 0x03 (first string byte length 3)
- byte('A'), byte('B'), byte('C') (first string encoded bytes)
- 0x00, 0x03 (second string byte length 3)
- byte('D'), byte('E'), byte('F') (second string encoded bytes)
- */
- t.Run("dynamic tuple decoding", func(t *testing.T) {
- tupleT, err := TypeOf("(string,bool,bool,bool,bool,string)")
- require.NoError(t, err, "make tuple type failure")
- inputEncode := []byte{
- 0x00, 0x05, 0b10100000, 0x00, 0x0A,
- 0x00, 0x03, byte('A'), byte('B'), byte('C'),
- 0x00, 0x03, byte('D'), byte('E'), byte('F'),
- }
- expected := []interface{}{
- "ABC", true, false, true, false, "DEF",
- }
- actual, err := tupleT.Decode(inputEncode)
- require.NoError(t, err, "decoding dynamic tuple should not return error")
- require.Equal(t, expected, actual, "dynamic tuple not match with expected")
- })
-
- // decoding test for tuple with static bool array
- // expected type: (bool[2], bool[2])
- // expected value: ({T, T}, {T, T})
- /*
- input bytes:
- 0b11000000 (first static bool array)
- 0b11000000 (second static bool array)
- */
- t.Run("static bool array tuple decoding", func(t *testing.T) {
- tupleT, err := TypeOf("(bool[2],bool[2])")
- require.NoError(t, err, "make tuple type failure")
- expected := []interface{}{
- []interface{}{true, true},
- []interface{}{true, true},
- }
- encodedInput := []byte{
- 0b11000000,
- 0b11000000,
- }
- actual, err := tupleT.Decode(encodedInput)
- require.NoError(t, err, "decode tuple value should not return error")
- require.Equal(t, expected, actual, "decoded tuple value do not match with expected")
- })
-
- // decoding test for tuple with static and dynamic bool array
- // expected type: (bool[2], bool[])
- // expected value: ({T, T}, {T, T})
- /*
- input bytes:
- 0b11000000 (first static bool array)
- 0x00, 0x03 (second dynamic bool array starts at 3rd byte)
- 0x00, 0x02 (dynamic bool array length 2)
- 0b11000000 (second static bool array)
- */
- t.Run("static/dynamic bool array tuple decoding", func(t *testing.T) {
- tupleT, err := TypeOf("(bool[2],bool[])")
- require.NoError(t, err, "make tuple type failure")
- expected := []interface{}{
- []interface{}{true, true},
- []interface{}{true, true},
- }
- encodedInput := []byte{
- 0b11000000,
- 0x00, 0x03,
- 0x00, 0x02, 0b11000000,
- }
- actual, err := tupleT.Decode(encodedInput)
- require.NoError(t, err, "decode tuple for static/dynamic bool array should not return error")
- require.Equal(t, expected, actual, "decoded tuple value do not match with expected")
- })
-
- // decoding test for tuple with all dynamic bool array
- // expected value: (bool[], bool[])
- // expected value: ({}, {})
- /*
- input bytes:
- 0x00, 0x04 (first dynamic bool array starts at 4th byte)
- 0x00, 0x06 (second dynamic bool array starts at 6th byte)
- 0x00, 0x00 (first dynamic bool array length 0)
- 0x00, 0x00 (second dynamic bool array length 0)
- */
- t.Run("empty dynamic array tuple decoding", func(t *testing.T) {
- tupleT, err := TypeOf("(bool[],bool[])")
- require.NoError(t, err, "make tuple type failure")
- expected := []interface{}{
- []interface{}{}, []interface{}{},
- }
- encodedInput := []byte{
- 0x00, 0x04, 0x00, 0x06,
- 0x00, 0x00, 0x00, 0x00,
- }
- actual, err := tupleT.Decode(encodedInput)
- require.NoError(t, err, "decode tuple for empty dynamic array should not return error")
- require.Equal(t, expected, actual, "decoded tuple value do not match with expected")
- })
-
- // decoding test for empty tuple
- // expected value: ()
- // byte input: ""
- t.Run("empty tuple decoding", func(t *testing.T) {
- tupleT, err := TypeOf("()")
- require.NoError(t, err, "make empty tuple type should not return error")
- actual, err := tupleT.Decode([]byte{})
- require.NoError(t, err, "decode empty tuple should not return error")
- require.Equal(t, []interface{}{}, actual, "empty tuple encode should not return error")
- })
-}
-
-func TestDecodeInvalid(t *testing.T) {
- partitiontest.PartitionTest(t)
- // decoding test for *corrupted* static bool array
- // expected 9 elements for static bool array
- // encoded bytes have only 8 bool values
- // should throw error
- t.Run("corrupted static bool array decode", func(t *testing.T) {
- inputBase := []byte{0b11111111}
- arrayType := makeStaticArrayType(boolType, 9)
- _, err := arrayType.Decode(inputBase)
- require.Error(t, err, "decoding corrupted static bool array should return error")
- })
-
- // decoding test for *corrupted* static bool array
- // expected 8 elements for static bool array
- // encoded bytes have 1 byte more (0b00000000)
- // should throw error
- t.Run("corrupted static bool array decode", func(t *testing.T) {
- inputBase := []byte{0b01001011, 0b00000000}
- arrayType := makeStaticArrayType(boolType, 8)
- _, err := arrayType.Decode(inputBase)
- require.Error(t, err, "decoding corrupted static bool array should return error")
- })
-
- // decoding test for *corrupted* static uint array
- // expected 8 uint elements in static uint64[8] array
- // encoded bytes provide only 7 uint64 encoding
- // should throw error
- t.Run("static uint array decode", func(t *testing.T) {
- inputBase := []byte{
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 1,
- 0, 0, 0, 0, 0, 0, 0, 2,
- 0, 0, 0, 0, 0, 0, 0, 3,
- 0, 0, 0, 0, 0, 0, 0, 4,
- 0, 0, 0, 0, 0, 0, 0, 5,
- 0, 0, 0, 0, 0, 0, 0, 6,
- }
- uintTArray, err := TypeOf("uint64[8]")
- require.NoError(t, err, "make uint64 static array type should not return error")
- _, err = uintTArray.Decode(inputBase)
- require.Error(t, err, "corrupted uint64 static array decode should return error")
- })
-
- // decoding test for *corrupted* static uint array
- // expected 7 uint elements in static uint64[7] array
- // encoded bytes provide 8 uint64 encoding (one more uint64: 7)
- // should throw error
- t.Run("static uint array decode", func(t *testing.T) {
- inputBase := []byte{
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 1,
- 0, 0, 0, 0, 0, 0, 0, 2,
- 0, 0, 0, 0, 0, 0, 0, 3,
- 0, 0, 0, 0, 0, 0, 0, 4,
- 0, 0, 0, 0, 0, 0, 0, 5,
- 0, 0, 0, 0, 0, 0, 0, 6,
- 0, 0, 0, 0, 0, 0, 0, 7,
- }
- uintTArray, err := TypeOf("uint64[7]")
- require.NoError(t, err, "make uint64 static array type should not return error")
- _, err = uintTArray.Decode(inputBase)
- require.Error(t, err, "corrupted uint64 static array decode should return error")
- })
-
- // decoding test for *corrupted* dynamic bool array
- // expected 0x0A (10) bool elements in encoding head
- // encoded bytes provide only 8 bool elements
- // should throw error
- t.Run("corrupted dynamic bool array decode", func(t *testing.T) {
- inputBase := []byte{
- 0x00, 0x0A, 0b10101010,
- }
- dynamicT := makeDynamicArrayType(boolType)
- _, err := dynamicT.Decode(inputBase)
- require.Error(t, err, "decode corrupted dynamic array should return error")
- })
-
- // decoding test for *corrupted* dynamic bool array
- // expected 0x07 (7) bool elements in encoding head
- // encoded bytes provide 1 byte more (0b00000000)
- // should throw error
- t.Run("corrupted dynamic bool array decode", func(t *testing.T) {
- inputBase := []byte{
- 0x00, 0x07, 0b10101010, 0b00000000,
- }
- dynamicT := makeDynamicArrayType(boolType)
- _, err := dynamicT.Decode(inputBase)
- require.Error(t, err, "decode corrupted dynamic array should return error")
- })
-
- // decoding test for *corrupted* dynamic tuple value
- // expected type: (string, bool, bool, bool, bool, string)
- // expected value: ("ABC", T, F, T, F, "DEF")
- /*
- corrupted bytes:
- 0x00, 0x04 (corrupted: first string start at 4th byte, should be 5th)
- 0b10100000 (4 bool tuple element compacted together)
- 0x00, 0x0A (second string start at 10th byte)
- 0x00, 0x03 (first string byte length 3)
- byte('A'), byte('B'), byte('C') (first string encoded bytes)
- 0x00, 0x03 (second string byte length 3)
- byte('D'), byte('E'), byte('F') (second string encoded bytes)
- */
- // the result would be: first string have length 0x0A, 0x00
- // the length exceeds the segment it allocated: 0x0A, 0x00, 0x03, byte('A'), byte('B'), byte('C')
- // should throw error
- t.Run("corrupted dynamic tuple decoding", func(t *testing.T) {
- inputEncode := []byte{
- 0x00, 0x04, 0b10100000, 0x00, 0x0A,
- 0x00, 0x03, byte('A'), byte('B'), byte('C'),
- 0x00, 0x03, byte('D'), byte('E'), byte('F'),
- }
- tupleT, err := TypeOf("(string,bool,bool,bool,bool,string)")
- require.NoError(t, err, "make tuple type failure")
- _, err = tupleT.Decode(inputEncode)
- require.Error(t, err, "corrupted decoding dynamic tuple should return error")
- })
-
- // decoding test for *corrupted* tuple with static bool arrays
- // expected type: (bool[2], bool[2])
- // expected value: ({T, T}, {T, T})
- /*
- corrupted bytes test case 0:
- 0b11000000
- 0b11000000
- 0b00000000 <- corrupted byte, 1 byte more
-
- corrupted bytes test case 0:
- 0b11000000
- <- corrupted byte, 1 byte missing
- */
- t.Run("corrupted static bool array tuple decoding", func(t *testing.T) {
- expectedType, err := TypeOf("(bool[2],bool[2])")
- require.NoError(t, err, "make tuple type failure")
- encodedInput0 := []byte{
- 0b11000000,
- 0b11000000,
- 0b00000000,
- }
- _, err = expectedType.Decode(encodedInput0)
- require.Error(t, err, "decode corrupted tuple value should return error")
-
- encodedInput1 := []byte{
- 0b11000000,
- }
- _, err = expectedType.Decode(encodedInput1)
- require.Error(t, err, "decode corrupted tuple value should return error")
- })
-
- // decoding test for *corrupted* tuple with static and dynamic bool array
- // expected type: (bool[2], bool[])
- // expected value: ({T, T}, {T, T})
- /*
- corrupted bytes:
- 0b11000000 (first static bool array)
- 0x03 <- corrupted, missing 0x00 byte (second dynamic bool array starts at 3rd byte)
- 0x00, 0x02 (dynamic bool array length 2)
- 0b11000000 (second static bool array)
- */
- t.Run("corrupted static/dynamic bool array tuple decoding", func(t *testing.T) {
- encodedInput := []byte{
- 0b11000000,
- 0x03,
- 0x00, 0x02, 0b11000000,
- }
- tupleT, err := TypeOf("(bool[2],bool[])")
- require.NoError(t, err, "make tuple type failure")
- _, err = tupleT.Decode(encodedInput)
- require.Error(t, err, "decode corrupted tuple for static/dynamic bool array should return error")
- })
-
- // decoding test for *corrupted* tuple with dynamic bool array
- // expected type: (bool[], bool[])
- // expected value: ({}, {})
- /*
- corrupted bytes:
- 0x00, 0x04 (first dynamic bool array starts at 4th byte)
- 0x00, 0x07 <- corrupted, should be 0x06 (second dynamic bool array starts at 6th byte)
- 0x00, 0x00 (first dynamic bool array length 0)
- 0x00, 0x00 (second dynamic bool array length 0)
-
- first dynamic array starts at 0x04, segment is 0x00, 0x00, 0x00, 1 byte 0x00 more
- second dynamic array starts at 0x07, and only have 0x00 1 byte
- */
- // should return error
- t.Run("corrupted empty dynamic array tuple decoding", func(t *testing.T) {
- encodedInput := []byte{
- 0x00, 0x04, 0x00, 0x07,
- 0x00, 0x00, 0x00, 0x00,
- }
- tupleT, err := TypeOf("(bool[],bool[])")
- require.NoError(t, err, "make tuple type failure")
- _, err = tupleT.Decode(encodedInput)
- require.Error(t, err, "decode corrupted tuple for empty dynamic array should return error")
- })
-
- // decoding test for *corrupted* empty tuple
- // expected value: ()
- // corrupted input: 0xFF, should be empty byte
- // should return error
- t.Run("corrupted empty tuple decoding", func(t *testing.T) {
- encodedInput := []byte{0xFF}
- tupleT, err := TypeOf("()")
- require.NoError(t, err, "make tuple type failure")
- _, err = tupleT.Decode(encodedInput)
- require.Error(t, err, "decode corrupted empty tuple should return error")
- })
-}
-
-type testUnit struct {
- serializedType string
- value interface{}
-}
-
-func categorySelfRoundTripTest(t *testing.T, category []testUnit) {
- for _, testObj := range category {
- abiType, err := TypeOf(testObj.serializedType)
- require.NoError(t, err, "failure to deserialize type: "+testObj.serializedType)
- encodedValue, err := abiType.Encode(testObj.value)
- require.NoError(t, err,
- "failure to encode value %#v over type %s", testObj.value, testObj.serializedType,
- )
- actual, err := abiType.Decode(encodedValue)
- require.NoError(t, err,
- "failure to decode value %#v for type %s", encodedValue, testObj.serializedType,
- )
- require.Equal(t, testObj.value, actual,
- "decoded value %#v not equal to expected value %#v", actual, testObj.value,
- )
- jsonEncodedValue, err := abiType.MarshalToJSON(testObj.value)
- require.NoError(t, err,
- "failure to encode value %#v to JSON type", testObj.value,
- )
- jsonActual, err := abiType.UnmarshalFromJSON(jsonEncodedValue)
- require.NoError(t, err,
- "failure to decode JSON value %s back for type %s",
- string(jsonEncodedValue), testObj.serializedType,
- )
- require.Equal(t, testObj.value, jsonActual,
- "decode JSON value %s not equal to expected %s", jsonActual, testObj.value,
- )
- }
-}
-
-func addPrimitiveRandomValues(t *testing.T, pool *map[BaseType][]testUnit) {
- (*pool)[Uint] = make([]testUnit, uintTestCaseCount*uintEnd/uintStepLength)
- (*pool)[Ufixed] = make([]testUnit, ufixedPrecision*uintEnd/uintStepLength)
-
- uintIndex := 0
- ufixedIndex := 0
-
- for bitSize := uintBegin; bitSize <= uintEnd; bitSize += uintStepLength {
- max := new(big.Int).Lsh(big.NewInt(1), uint(bitSize))
-
- uintT, err := makeUintType(bitSize)
- require.NoError(t, err, "make uint type failure")
- uintTstr := uintT.String()
-
- for j := 0; j < uintTestCaseCount; j++ {
- randVal, err := rand.Int(rand.Reader, max)
- require.NoError(t, err, "generate random uint, should be no error")
-
- narrowest, err := castBigIntToNearestPrimitive(randVal, uint16(bitSize))
- require.NoError(t, err, "cast random uint to nearest primitive failure")
-
- (*pool)[Uint][uintIndex] = testUnit{serializedType: uintTstr, value: narrowest}
- uintIndex++
- }
-
- for precision := 1; precision <= ufixedPrecision; precision++ {
- randVal, err := rand.Int(rand.Reader, max)
- require.NoError(t, err, "generate random ufixed, should be no error")
-
- narrowest, err := castBigIntToNearestPrimitive(randVal, uint16(bitSize))
- require.NoError(t, err, "cast random uint to nearest primitive failure")
-
- ufixedT, err := makeUfixedType(bitSize, precision)
- require.NoError(t, err, "make ufixed type failure")
- ufixedTstr := ufixedT.String()
- (*pool)[Ufixed][ufixedIndex] = testUnit{serializedType: ufixedTstr, value: narrowest}
- ufixedIndex++
- }
- }
- categorySelfRoundTripTest(t, (*pool)[Uint])
- categorySelfRoundTripTest(t, (*pool)[Ufixed])
-
- (*pool)[Byte] = make([]testUnit, byteTestCaseCount)
- for i := 0; i < byteTestCaseCount; i++ {
- (*pool)[Byte][i] = testUnit{serializedType: byteType.String(), value: byte(i)}
- }
- categorySelfRoundTripTest(t, (*pool)[Byte])
-
- (*pool)[Bool] = make([]testUnit, boolTestCaseCount)
- (*pool)[Bool][0] = testUnit{serializedType: boolType.String(), value: false}
- (*pool)[Bool][1] = testUnit{serializedType: boolType.String(), value: true}
- categorySelfRoundTripTest(t, (*pool)[Bool])
-
- maxAddress := new(big.Int).Lsh(big.NewInt(1), addressByteSize<<3)
- (*pool)[Address] = make([]testUnit, addressTestCaseCount)
- for i := 0; i < addressTestCaseCount; i++ {
- randAddrVal, err := rand.Int(rand.Reader, maxAddress)
- require.NoError(t, err, "generate random value for address, should be no error")
- addrBytes := make([]byte, addressByteSize)
- randAddrVal.FillBytes(addrBytes)
- (*pool)[Address][i] = testUnit{serializedType: addressType.String(), value: addrBytes}
- }
- categorySelfRoundTripTest(t, (*pool)[Address])
-
- (*pool)[String] = make([]testUnit, stringTestCaseCount*stringTestCaseSpecLenCount)
- stringIndex := 0
- for length := 1; length <= stringTestCaseCount; length++ {
- for i := 0; i < stringTestCaseSpecLenCount; i++ {
- (*pool)[String][stringIndex] = testUnit{
- serializedType: stringType.String(),
- value: gobberish.GenerateString(length),
- }
- stringIndex++
- }
- }
- categorySelfRoundTripTest(t, (*pool)[String])
-}
-
-func takeSomeFromCategoryAndGenerateArray(
- t *testing.T, abiT BaseType, srtIndex int, takeNum uint16, pool *map[BaseType][]testUnit) {
-
- tempArray := make([]interface{}, takeNum)
- for i := 0; i < int(takeNum); i++ {
- index := srtIndex + i
- if index >= len((*pool)[abiT]) {
- index = srtIndex
- }
- tempArray[i] = (*pool)[abiT][index].value
- }
- tempT, err := TypeOf((*pool)[abiT][srtIndex].serializedType)
- require.NoError(t, err, "type in test uint cannot be deserialized")
- (*pool)[ArrayStatic] = append((*pool)[ArrayStatic], testUnit{
- serializedType: makeStaticArrayType(tempT, takeNum).String(),
- value: tempArray,
- })
- (*pool)[ArrayDynamic] = append((*pool)[ArrayDynamic], testUnit{
- serializedType: makeDynamicArrayType(tempT).String(),
- value: tempArray,
- })
-}
-
-func addArrayRandomValues(t *testing.T, pool *map[BaseType][]testUnit) {
- for intIndex := 0; intIndex < len((*pool)[Uint]); intIndex += uintTestCaseCount {
- takeSomeFromCategoryAndGenerateArray(t, Uint, intIndex, takeNum, pool)
- }
- takeSomeFromCategoryAndGenerateArray(t, Byte, 0, takeNum, pool)
- takeSomeFromCategoryAndGenerateArray(t, Address, 0, takeNum, pool)
- takeSomeFromCategoryAndGenerateArray(t, String, 0, takeNum, pool)
- takeSomeFromCategoryAndGenerateArray(t, Bool, 0, takeNum, pool)
-
- categorySelfRoundTripTest(t, (*pool)[ArrayStatic])
- categorySelfRoundTripTest(t, (*pool)[ArrayDynamic])
-}
-
-func addTupleRandomValues(t *testing.T, slotRange BaseType, pool *map[BaseType][]testUnit) {
- for i := 0; i < tupleTestCaseCount; i++ {
- tupleLenBig, err := rand.Int(rand.Reader, big.NewInt(tupleMaxLength))
- require.NoError(t, err, "generate random tuple length should not return error")
- tupleLen := tupleLenBig.Int64() + 1
- testUnits := make([]testUnit, tupleLen)
- for index := 0; index < int(tupleLen); index++ {
- tupleTypeIndexBig, err := rand.Int(rand.Reader, big.NewInt(int64(slotRange)+1))
- require.NoError(t, err, "generate random tuple element type index should not return error")
- tupleTypeIndex := BaseType(tupleTypeIndexBig.Int64())
- tupleElemChoiceRange := len((*pool)[tupleTypeIndex])
-
- tupleElemRangeIndexBig, err := rand.Int(rand.Reader, big.NewInt(int64(tupleElemChoiceRange)))
- require.NoError(t, err, "generate random tuple element index in test pool should not return error")
- tupleElemRangeIndex := tupleElemRangeIndexBig.Int64()
- tupleElem := (*pool)[tupleTypeIndex][tupleElemRangeIndex]
- testUnits[index] = tupleElem
- }
- elemValues := make([]interface{}, tupleLen)
- elemTypes := make([]Type, tupleLen)
- for index := 0; index < int(tupleLen); index++ {
- elemValues[index] = testUnits[index].value
- abiT, err := TypeOf(testUnits[index].serializedType)
- require.NoError(t, err, "deserialize type failure for tuple elements")
- elemTypes[index] = abiT
- }
- tupleT, err := MakeTupleType(elemTypes)
- require.NoError(t, err, "make tuple type failure")
- (*pool)[Tuple] = append((*pool)[Tuple], testUnit{
- serializedType: tupleT.String(),
- value: elemValues,
- })
- }
-}
-
-func TestRandomABIEncodeDecodeRoundTrip(t *testing.T) {
- partitiontest.PartitionTest(t)
- testValuePool := make(map[BaseType][]testUnit)
- addPrimitiveRandomValues(t, &testValuePool)
- addArrayRandomValues(t, &testValuePool)
- addTupleRandomValues(t, String, &testValuePool)
- addTupleRandomValues(t, Tuple, &testValuePool)
- categorySelfRoundTripTest(t, testValuePool[Tuple])
-}
-
-func TestParseArgJSONtoByteSlice(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- makeRepeatSlice := func(size int, value string) []string {
- slice := make([]string, size)
- for i := range slice {
- slice[i] = value
- }
- return slice
- }
-
- tests := []struct {
- argTypes []string
- jsonArgs []string
- expectedAppArgs [][]byte
- }{
- {
- argTypes: []string{},
- jsonArgs: []string{},
- expectedAppArgs: [][]byte{},
- },
- {
- argTypes: []string{"uint8"},
- jsonArgs: []string{"100"},
- expectedAppArgs: [][]byte{{100}},
- },
- {
- argTypes: []string{"uint8", "uint16"},
- jsonArgs: []string{"100", "65535"},
- expectedAppArgs: [][]byte{{100}, {255, 255}},
- },
- {
- argTypes: makeRepeatSlice(15, "string"),
- jsonArgs: []string{
- `"a"`,
- `"b"`,
- `"c"`,
- `"d"`,
- `"e"`,
- `"f"`,
- `"g"`,
- `"h"`,
- `"i"`,
- `"j"`,
- `"k"`,
- `"l"`,
- `"m"`,
- `"n"`,
- `"o"`,
- },
- expectedAppArgs: [][]byte{
- {00, 01, 97},
- {00, 01, 98},
- {00, 01, 99},
- {00, 01, 100},
- {00, 01, 101},
- {00, 01, 102},
- {00, 01, 103},
- {00, 01, 104},
- {00, 01, 105},
- {00, 01, 106},
- {00, 01, 107},
- {00, 01, 108},
- {00, 01, 109},
- {00, 01, 110},
- {00, 01, 111},
- },
- },
- {
- argTypes: makeRepeatSlice(16, "string"),
- jsonArgs: []string{
- `"a"`,
- `"b"`,
- `"c"`,
- `"d"`,
- `"e"`,
- `"f"`,
- `"g"`,
- `"h"`,
- `"i"`,
- `"j"`,
- `"k"`,
- `"l"`,
- `"m"`,
- `"n"`,
- `"o"`,
- `"p"`,
- },
- expectedAppArgs: [][]byte{
- {00, 01, 97},
- {00, 01, 98},
- {00, 01, 99},
- {00, 01, 100},
- {00, 01, 101},
- {00, 01, 102},
- {00, 01, 103},
- {00, 01, 104},
- {00, 01, 105},
- {00, 01, 106},
- {00, 01, 107},
- {00, 01, 108},
- {00, 01, 109},
- {00, 01, 110},
- {00, 04, 00, 07, 00, 01, 111, 00, 01, 112},
- },
- },
- }
-
- for i, test := range tests {
- t.Run(fmt.Sprintf("index=%d", i), func(t *testing.T) {
- applicationArgs := [][]byte{}
- err := ParseArgJSONtoByteSlice(test.argTypes, test.jsonArgs, &applicationArgs)
- require.NoError(t, err)
- require.Equal(t, test.expectedAppArgs, applicationArgs)
- })
- }
-}
-
-func TestParseMethodSignature(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- tests := []struct {
- signature string
- name string
- argTypes []string
- returnType string
- }{
- {
- signature: "add(uint8,uint16,pay,account,txn)uint32",
- name: "add",
- argTypes: []string{"uint8", "uint16", "pay", "account", "txn"},
- returnType: "uint32",
- },
- {
- signature: "nothing()void",
- name: "nothing",
- argTypes: []string{},
- returnType: "void",
- },
- {
- signature: "tupleArgs((uint8,uint128),account,(string,(bool,bool)))bool",
- name: "tupleArgs",
- argTypes: []string{"(uint8,uint128)", "account", "(string,(bool,bool))"},
- returnType: "bool",
- },
- {
- signature: "tupleReturn(uint64)(bool,bool,bool)",
- name: "tupleReturn",
- argTypes: []string{"uint64"},
- returnType: "(bool,bool,bool)",
- },
- {
- signature: "tupleArgsAndReturn((uint8,uint128),account,(string,(bool,bool)))(bool,bool,bool)",
- name: "tupleArgsAndReturn",
- argTypes: []string{"(uint8,uint128)", "account", "(string,(bool,bool))"},
- returnType: "(bool,bool,bool)",
- },
- }
-
- for _, test := range tests {
- t.Run(test.signature, func(t *testing.T) {
- name, argTypes, returnType, err := ParseMethodSignature(test.signature)
- require.NoError(t, err)
- require.Equal(t, test.name, name)
- require.Equal(t, test.argTypes, argTypes)
- require.Equal(t, test.returnType, returnType)
- })
- }
-}
-
-func TestInferToSlice(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var emptySlice []int
- tests := []struct {
- toBeInferred interface{}
- length int
- }{
- {
- toBeInferred: []int{},
- length: 0,
- },
- {
- toBeInferred: make([]int, 0),
- length: 0,
- },
- {
- toBeInferred: emptySlice,
- length: 0,
- },
- {
- toBeInferred: [0]int{},
- length: 0,
- },
- {
- toBeInferred: [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32},
- length: 32,
- },
- {
- toBeInferred: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32},
- length: 32,
- },
- }
-
- for i, test := range tests {
- inferredSlice, err := inferToSlice(test.toBeInferred)
- require.NoError(t, err, "inferToSlice on testcase %d failed to successfully infer %v", i, test.toBeInferred)
- require.Equal(t, test.length, len(inferredSlice), "inferToSlice on testcase %d inferred different length, expected %d", i, test.length)
- }
-
- // one more testcase for totally nil (with no type information) is bad, should not pass the test
- _, err := inferToSlice(nil)
- require.EqualError(
- t, err,
- "cannot infer an interface value as a slice of interface element",
- "inferToSlice should return type inference error when passed in nil with unexpected Kind")
-
- // one moar testcase for wrong typed nil is bad, should not pass the test
- var nilPt *uint64 = nil
- _, err = inferToSlice(nilPt)
- require.EqualError(
- t, err,
- "cannot infer an interface value as a slice of interface element",
- "inferToSlice should return type inference error when passing argument type other than slice or array")
-}
diff --git a/data/abi/abi_json.go b/data/abi/abi_json.go
deleted file mode 100644
index a71823f0c..000000000
--- a/data/abi/abi_json.go
+++ /dev/null
@@ -1,291 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package abi
-
-import (
- "bytes"
- "crypto/sha512"
- "encoding/base32"
- "encoding/json"
- "fmt"
- "math/big"
-)
-
-// NOTE: discussion about go-algorand-sdk
-// https://github.com/algorand/go-algorand/pull/3375#issuecomment-1007536841
-
-var base32Encoder = base32.StdEncoding.WithPadding(base32.NoPadding)
-
-func addressCheckSum(addressBytes []byte) ([]byte, error) {
- if len(addressBytes) != addressByteSize {
- return nil, fmt.Errorf("address bytes should be of length 32")
- }
- hashed := sha512.Sum512_256(addressBytes[:])
- return hashed[addressByteSize-checksumByteSize:], nil
-}
-
-func castBigIntToNearestPrimitive(num *big.Int, bitSize uint16) (interface{}, error) {
- if num.BitLen() > int(bitSize) {
- return nil, fmt.Errorf("cast big int to nearest primitive failure: %v >= 2^%d", num, bitSize)
- } else if num.Sign() < 0 {
- return nil, fmt.Errorf("cannot cast big int to near primitive: %v < 0", num)
- }
-
- switch bitSize / 8 {
- case 1:
- return uint8(num.Uint64()), nil
- case 2:
- return uint16(num.Uint64()), nil
- case 3, 4:
- return uint32(num.Uint64()), nil
- case 5, 6, 7, 8:
- return num.Uint64(), nil
- default:
- return num, nil
- }
-}
-
-// MarshalToJSON convert golang value to JSON format from ABI type
-func (t Type) MarshalToJSON(value interface{}) ([]byte, error) {
- switch t.abiTypeID {
- case Uint:
- bytesUint, err := encodeInt(value, t.bitSize)
- if err != nil {
- return nil, err
- }
- return new(big.Int).SetBytes(bytesUint).MarshalJSON()
- case Ufixed:
- denom := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(t.precision)), nil)
- encodedUint, err := encodeInt(value, t.bitSize)
- if err != nil {
- return nil, err
- }
- return []byte(new(big.Rat).SetFrac(new(big.Int).SetBytes(encodedUint), denom).FloatString(int(t.precision))), nil
- case Bool:
- boolValue, ok := value.(bool)
- if !ok {
- return nil, fmt.Errorf("cannot infer to bool for marshal to JSON")
- }
- return json.Marshal(boolValue)
- case Byte:
- byteValue, ok := value.(byte)
- if !ok {
- return nil, fmt.Errorf("cannot infer to byte for marshal to JSON")
- }
- return json.Marshal(byteValue)
- case Address:
- var addressValueInternal []byte
- switch valueCasted := value.(type) {
- case []byte:
- if len(valueCasted) != addressByteSize {
- return nil, fmt.Errorf("address byte slice length not equal to 32 byte")
- }
- addressValueInternal = valueCasted
- case [addressByteSize]byte:
- copy(addressValueInternal[:], valueCasted[:])
- default:
- return nil, fmt.Errorf("cannot infer to byte slice/array for marshal to JSON")
- }
- checksum, err := addressCheckSum(addressValueInternal)
- if err != nil {
- return nil, err
- }
- addressValueInternal = append(addressValueInternal, checksum...)
- return json.Marshal(base32Encoder.EncodeToString(addressValueInternal))
- case ArrayStatic, ArrayDynamic:
- values, err := inferToSlice(value)
- if err != nil {
- return nil, err
- }
- if t.abiTypeID == ArrayStatic && int(t.staticLength) != len(values) {
- return nil, fmt.Errorf("length of slice %d != type specific length %d", len(values), t.staticLength)
- }
- if t.childTypes[0].abiTypeID == Byte {
- byteArr := make([]byte, len(values))
- for i := 0; i < len(values); i++ {
- tempByte, ok := values[i].(byte)
- if !ok {
- return nil, fmt.Errorf("cannot infer byte element from slice")
- }
- byteArr[i] = tempByte
- }
- return json.Marshal(byteArr)
- }
- rawMsgSlice := make([]json.RawMessage, len(values))
- for i := 0; i < len(values); i++ {
- rawMsgSlice[i], err = t.childTypes[0].MarshalToJSON(values[i])
- if err != nil {
- return nil, err
- }
- }
- return json.Marshal(rawMsgSlice)
- case String:
- stringVal, ok := value.(string)
- if !ok {
- return nil, fmt.Errorf("cannot infer to string for marshal to JSON")
- }
- return json.Marshal(stringVal)
- case Tuple:
- values, err := inferToSlice(value)
- if err != nil {
- return nil, err
- }
- if len(values) != int(t.staticLength) {
- return nil, fmt.Errorf("tuple element number != value slice length")
- }
- rawMsgSlice := make([]json.RawMessage, len(values))
- for i := 0; i < len(values); i++ {
- rawMsgSlice[i], err = t.childTypes[i].MarshalToJSON(values[i])
- if err != nil {
- return nil, err
- }
- }
- return json.Marshal(rawMsgSlice)
- default:
- return nil, fmt.Errorf("cannot infer ABI type for marshalling value to JSON")
- }
-}
-
-// UnmarshalFromJSON convert bytes to golang value following ABI type and encoding rules
-func (t Type) UnmarshalFromJSON(jsonEncoded []byte) (interface{}, error) {
- switch t.abiTypeID {
- case Uint:
- num := new(big.Int)
- if err := num.UnmarshalJSON(jsonEncoded); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to uint: %v", string(jsonEncoded), err)
- }
- return castBigIntToNearestPrimitive(num, t.bitSize)
- case Ufixed:
- floatTemp := new(big.Rat)
- if err := floatTemp.UnmarshalText(jsonEncoded); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to ufixed: %v", string(jsonEncoded), err)
- }
- denom := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(t.precision)), nil)
- denomRat := new(big.Rat).SetInt(denom)
- numeratorRat := new(big.Rat).Mul(denomRat, floatTemp)
- if !numeratorRat.IsInt() {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to ufixed: precision out of range", string(jsonEncoded))
- }
- return castBigIntToNearestPrimitive(numeratorRat.Num(), t.bitSize)
- case Bool:
- var elem bool
- if err := json.Unmarshal(jsonEncoded, &elem); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to bool: %v", string(jsonEncoded), err)
- }
- return elem, nil
- case Byte:
- var elem byte
- if err := json.Unmarshal(jsonEncoded, &elem); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded to byte: %v", err)
- }
- return elem, nil
- case Address:
- var addrStr string
- if err := json.Unmarshal(jsonEncoded, &addrStr); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded to address string: %v", err)
- }
- decoded, err := base32Encoder.DecodeString(addrStr)
- if err != nil {
- return nil,
- fmt.Errorf("cannot cast JSON encoded address string (%s) to address: %v", addrStr, err)
- }
- if len(decoded) != addressByteSize+checksumByteSize {
- return nil,
- fmt.Errorf(
- "cannot cast JSON encoded address string (%s) to address: "+
- "decoded byte length should equal to 36 with address and checksum",
- string(jsonEncoded),
- )
- }
- checksum, err := addressCheckSum(decoded[:addressByteSize])
- if err != nil {
- return nil, err
- }
- if !bytes.Equal(checksum, decoded[addressByteSize:]) {
- return nil, fmt.Errorf("cannot cast JSON encoded address string (%s) to address: decoded checksum unmatch", addrStr)
- }
- return decoded[:addressByteSize], nil
- case ArrayStatic, ArrayDynamic:
- if t.childTypes[0].abiTypeID == Byte && bytes.HasPrefix(jsonEncoded, []byte{'"'}) {
- var byteArr []byte
- err := json.Unmarshal(jsonEncoded, &byteArr)
- if err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to bytes: %v", string(jsonEncoded), err)
- }
- if t.abiTypeID == ArrayStatic && len(byteArr) != int(t.staticLength) {
- return nil, fmt.Errorf("length of slice %d != type specific length %d", len(byteArr), t.staticLength)
- }
- outInterface := make([]interface{}, len(byteArr))
- for i := 0; i < len(byteArr); i++ {
- outInterface[i] = byteArr[i]
- }
- return outInterface, nil
- }
- var elems []json.RawMessage
- if err := json.Unmarshal(jsonEncoded, &elems); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to array: %v", string(jsonEncoded), err)
- }
- if t.abiTypeID == ArrayStatic && len(elems) != int(t.staticLength) {
- return nil, fmt.Errorf("JSON array element number != ABI array elem number")
- }
- values := make([]interface{}, len(elems))
- for i := 0; i < len(elems); i++ {
- tempValue, err := t.childTypes[0].UnmarshalFromJSON(elems[i])
- if err != nil {
- return nil, err
- }
- values[i] = tempValue
- }
- return values, nil
- case String:
- stringEncoded := string(jsonEncoded)
- if bytes.HasPrefix(jsonEncoded, []byte{'"'}) {
- var stringVar string
- if err := json.Unmarshal(jsonEncoded, &stringVar); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to string: %v", stringEncoded, err)
- }
- return stringVar, nil
- } else if bytes.HasPrefix(jsonEncoded, []byte{'['}) {
- var elems []byte
- if err := json.Unmarshal(jsonEncoded, &elems); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to string: %v", stringEncoded, err)
- }
- return string(elems), nil
- } else {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to string", stringEncoded)
- }
- case Tuple:
- var elems []json.RawMessage
- if err := json.Unmarshal(jsonEncoded, &elems); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to array for tuple: %v", string(jsonEncoded), err)
- }
- if len(elems) != int(t.staticLength) {
- return nil, fmt.Errorf("JSON array element number != ABI tuple elem number")
- }
- values := make([]interface{}, len(elems))
- for i := 0; i < len(elems); i++ {
- tempValue, err := t.childTypes[i].UnmarshalFromJSON(elems[i])
- if err != nil {
- return nil, err
- }
- values[i] = tempValue
- }
- return values, nil
- default:
- return nil, fmt.Errorf("cannot cast JSON encoded %s to ABI encoding stuff", string(jsonEncoded))
- }
-}
diff --git a/data/abi/abi_json_test.go b/data/abi/abi_json_test.go
deleted file mode 100644
index 49083fdea..000000000
--- a/data/abi/abi_json_test.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package abi
-
-import (
- "crypto/rand"
- "math/big"
- "testing"
-
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/stretchr/testify/require"
-)
-
-func TestRandomAddressEquality(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- upperLimit := new(big.Int).Lsh(big.NewInt(1), addressByteSize<<3)
- var addrBasics basics.Address
- var addrABI []byte = make([]byte, addressByteSize)
-
- for testCaseIndex := 0; testCaseIndex < addressTestCaseCount; testCaseIndex++ {
- randomAddrInt, err := rand.Int(rand.Reader, upperLimit)
- require.NoError(t, err, "cryptographic random int init fail")
-
- randomAddrInt.FillBytes(addrBasics[:])
- randomAddrInt.FillBytes(addrABI)
-
- checkSumBasics := addrBasics.GetChecksum()
- checkSumABI, err := addressCheckSum(addrABI)
- require.NoError(t, err, "ABI compute checksum for address slice failed")
-
- require.Equal(t, checkSumBasics, checkSumABI,
- "basics.Address computed checksum %v not equal to data.abi computed checksum %v",
- )
- }
-}
-
-func TestJSONtoInterfaceValid(t *testing.T) {
- partitiontest.PartitionTest(t)
- var testCases = []struct {
- input string
- typeStr string
- expected interface{}
- }{
- {
- input: `[true, [0, 1, 2], 17]`,
- typeStr: `(bool,byte[],uint64)`,
- expected: []interface{}{
- true,
- []interface{}{byte(0), byte(1), byte(2)},
- uint64(17),
- },
- },
- {
- input: `[true, "AAEC", 17]`,
- typeStr: `(bool,byte[],uint64)`,
- expected: []interface{}{
- true,
- []interface{}{byte(0), byte(1), byte(2)},
- uint64(17),
- },
- },
- {
- input: `"AQEEBQEE"`,
- typeStr: `byte[6]`,
- expected: []interface{}{byte(1), byte(1), byte(4), byte(5), byte(1), byte(4)},
- },
- {
- input: `[[0, [true, false], "utf-8"], [18446744073709551615, [false, true], "pistachio"]]`,
- typeStr: `(uint64,bool[2],string)[]`,
- expected: []interface{}{
- []interface{}{uint64(0), []interface{}{true, false}, "utf-8"},
- []interface{}{^uint64(0), []interface{}{false, true}, "pistachio"},
- },
- },
- {
- input: `[]`,
- typeStr: `(uint64,bool[2],string)[]`,
- expected: []interface{}{},
- },
- {
- input: "[]",
- typeStr: "()",
- expected: []interface{}{},
- },
- {
- input: "[65, 66, 67]",
- typeStr: "string",
- expected: "ABC",
- },
- {
- input: "[]",
- typeStr: "string",
- expected: "",
- },
- {
- input: "123.456",
- typeStr: "ufixed64x3",
- expected: uint64(123456),
- },
- {
- input: `"optin"`,
- typeStr: "string",
- expected: "optin",
- },
- {
- input: `"AAEC"`,
- typeStr: "byte[3]",
- expected: []interface{}{byte(0), byte(1), byte(2)},
- },
- {
- input: `["uwu",["AAEC",12.34]]`,
- typeStr: "(string,(byte[3],ufixed64x3))",
- expected: []interface{}{"uwu", []interface{}{[]interface{}{byte(0), byte(1), byte(2)}, uint64(12340)}},
- },
- {
- input: `[399,"should pass",[true,false,false,true]]`,
- typeStr: "(uint64,string,bool[])",
- expected: []interface{}{uint64(399), "should pass", []interface{}{true, false, false, true}},
- },
- }
-
- for _, testCase := range testCases {
- abiT, err := TypeOf(testCase.typeStr)
- require.NoError(t, err, "fail to construct ABI type (%s): %v", testCase.typeStr, err)
- res, err := abiT.UnmarshalFromJSON([]byte(testCase.input))
- require.NoError(t, err, "fail to unmarshal JSON to interface: (%s): %v", testCase.input, err)
- require.Equal(t, testCase.expected, res, "%v not matching with expected value %v", res, testCase.expected)
- resEncoded, err := abiT.Encode(res)
- require.NoError(t, err, "fail to encode %v to ABI bytes: %v", res, err)
- resDecoded, err := abiT.Decode(resEncoded)
- require.NoError(t, err, "fail to decode ABI bytes of %v: %v", res, err)
- require.Equal(t, res, resDecoded, "ABI encode-decode round trip: %v not match with expected %v", resDecoded, res)
- }
-}
diff --git a/data/abi/abi_type.go b/data/abi/abi_type.go
deleted file mode 100644
index aa4e0b75a..000000000
--- a/data/abi/abi_type.go
+++ /dev/null
@@ -1,498 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package abi
-
-import (
- "fmt"
- "math"
- "regexp"
- "strconv"
- "strings"
-)
-
-/*
- ABI-Types: uint<N>: An N-bit unsigned integer (8 <= N <= 512 and N % 8 = 0).
- | byte (alias for uint8)
- | ufixed <N> x <M> (8 <= N <= 512, N % 8 = 0, and 0 < M <= 160)
- | bool
- | address (alias for byte[32])
- | <type> [<N>]
- | <type> []
- | string
- | (T1, ..., Tn)
-*/
-
-// BaseType is an type-alias for uint32. A BaseType value indicates the type of an ABI value.
-type BaseType uint32
-
-const (
- // Uint is the index (0) for `Uint` type in ABI encoding.
- Uint BaseType = iota
- // Byte is the index (1) for `Byte` type in ABI encoding.
- Byte
- // Ufixed is the index (2) for `UFixed` type in ABI encoding.
- Ufixed
- // Bool is the index (3) for `Bool` type in ABI encoding.
- Bool
- // ArrayStatic is the index (4) for static length array (<type>[length]) type in ABI encoding.
- ArrayStatic
- // Address is the index (5) for `Address` type in ABI encoding (an type alias of Byte[32]).
- Address
- // ArrayDynamic is the index (6) for dynamic length array (<type>[]) type in ABI encoding.
- ArrayDynamic
- // String is the index (7) for `String` type in ABI encoding (an type alias of Byte[]).
- String
- // Tuple is the index (8) for tuple `(<type 0>, ..., <type k>)` in ABI encoding.
- Tuple
-)
-
-const (
- addressByteSize = 32
- checksumByteSize = 4
- singleByteSize = 1
- singleBoolSize = 1
- lengthEncodeByteSize = 2
- abiEncodingLengthLimit = 1 << 16
-)
-
-// Type is the struct that stores information about an ABI value's type.
-type Type struct {
- abiTypeID BaseType
- childTypes []Type
-
- // only can be applied to `uint` bitSize <N> or `ufixed` bitSize <N>
- bitSize uint16
- // only can be applied to `ufixed` precision <M>
- precision uint16
-
- // length for static array / tuple
- /*
- by ABI spec, len over binary array returns number of bytes
- the type is uint16, which allows for only length in [0, 2^16 - 1]
- representation of static length can only be constrained in uint16 type
- */
- // NOTE may want to change back to uint32/uint64
- staticLength uint16
-}
-
-// String serialize an ABI Type to a string in ABI encoding.
-func (t Type) String() string {
- switch t.abiTypeID {
- case Uint:
- return fmt.Sprintf("uint%d", t.bitSize)
- case Byte:
- return "byte"
- case Ufixed:
- return fmt.Sprintf("ufixed%dx%d", t.bitSize, t.precision)
- case Bool:
- return "bool"
- case ArrayStatic:
- return fmt.Sprintf("%s[%d]", t.childTypes[0].String(), t.staticLength)
- case Address:
- return "address"
- case ArrayDynamic:
- return t.childTypes[0].String() + "[]"
- case String:
- return "string"
- case Tuple:
- typeStrings := make([]string, len(t.childTypes))
- for i := 0; i < len(t.childTypes); i++ {
- typeStrings[i] = t.childTypes[i].String()
- }
- return "(" + strings.Join(typeStrings, ",") + ")"
- default:
- panic("Type Serialization Error, fail to infer from abiTypeID (bruh you shouldn't be here)")
- }
-}
-
-var staticArrayRegexp = regexp.MustCompile(`^([a-z\d\[\](),]+)\[([1-9][\d]*)]$`)
-var ufixedRegexp = regexp.MustCompile(`^ufixed([1-9][\d]*)x([1-9][\d]*)$`)
-
-// TypeOf parses an ABI type string.
-// For example: `TypeOf("(uint64,byte[])")`
-func TypeOf(str string) (Type, error) {
- switch {
- case strings.HasSuffix(str, "[]"):
- arrayArgType, err := TypeOf(str[:len(str)-2])
- if err != nil {
- return Type{}, err
- }
- return makeDynamicArrayType(arrayArgType), nil
- case strings.HasSuffix(str, "]"):
- stringMatches := staticArrayRegexp.FindStringSubmatch(str)
- // match the string itself, array element type, then array length
- if len(stringMatches) != 3 {
- return Type{}, fmt.Errorf(`static array ill formated: "%s"`, str)
- }
- // guaranteed that the length of array is existing
- arrayLengthStr := stringMatches[2]
- // allowing only decimal static array length, with limit size to 2^16 - 1
- arrayLength, err := strconv.ParseUint(arrayLengthStr, 10, 16)
- if err != nil {
- return Type{}, err
- }
- // parse the array element type
- arrayType, err := TypeOf(stringMatches[1])
- if err != nil {
- return Type{}, err
- }
- return makeStaticArrayType(arrayType, uint16(arrayLength)), nil
- case strings.HasPrefix(str, "uint"):
- typeSize, err := strconv.ParseUint(str[4:], 10, 16)
- if err != nil {
- return Type{}, fmt.Errorf(`ill formed uint type: "%s"`, str)
- }
- return makeUintType(int(typeSize))
- case str == "byte":
- return byteType, nil
- case strings.HasPrefix(str, "ufixed"):
- stringMatches := ufixedRegexp.FindStringSubmatch(str)
- // match string itself, then type-bitSize, and type-precision
- if len(stringMatches) != 3 {
- return Type{}, fmt.Errorf(`ill formed ufixed type: "%s"`, str)
- }
- // guaranteed that there are 2 uint strings in ufixed string
- ufixedSize, err := strconv.ParseUint(stringMatches[1], 10, 16)
- if err != nil {
- return Type{}, err
- }
- ufixedPrecision, err := strconv.ParseUint(stringMatches[2], 10, 16)
- if err != nil {
- return Type{}, err
- }
- return makeUfixedType(int(ufixedSize), int(ufixedPrecision))
- case str == "bool":
- return boolType, nil
- case str == "address":
- return addressType, nil
- case str == "string":
- return stringType, nil
- case len(str) >= 2 && str[0] == '(' && str[len(str)-1] == ')':
- tupleContent, err := parseTupleContent(str[1 : len(str)-1])
- if err != nil {
- return Type{}, err
- }
- tupleTypes := make([]Type, len(tupleContent))
- for i := 0; i < len(tupleContent); i++ {
- ti, err := TypeOf(tupleContent[i])
- if err != nil {
- return Type{}, err
- }
- tupleTypes[i] = ti
- }
- return MakeTupleType(tupleTypes)
- default:
- return Type{}, fmt.Errorf(`cannot convert the string "%s" to an ABI type`, str)
- }
-}
-
-// segment keeps track of the start and end of a segment in a string.
-type segment struct{ left, right int }
-
-// parseTupleContent splits an ABI encoded string for tuple type into multiple sub-strings.
-// Each sub-string represents a content type of the tuple type.
-// The argument str is the content between parentheses of tuple, i.e.
-// (...... str ......)
-// ^ ^
-func parseTupleContent(str string) ([]string, error) {
- // if the tuple type content is empty (which is also allowed)
- // just return the empty string list
- if len(str) == 0 {
- return []string{}, nil
- }
-
- // the following 2 checks want to make sure input string can be separated by comma
- // with form: "...substr_0,...substr_1,...,...substr_k"
-
- // str should noe have leading/tailing comma
- if strings.HasSuffix(str, ",") || strings.HasPrefix(str, ",") {
- return []string{}, fmt.Errorf("parsing error: tuple content should not start with comma")
- }
-
- // str should not have consecutive commas contained
- if strings.Contains(str, ",,") {
- return []string{}, fmt.Errorf("no consecutive commas")
- }
-
- var parenSegmentRecord = make([]segment, 0)
- var stack []int
-
- // get the most exterior parentheses segment (not overlapped by other parentheses)
- // illustration: "*****,(*****),*****" => ["*****", "(*****)", "*****"]
- // once iterate to left paren (, stack up by 1 in stack
- // iterate to right paren ), pop 1 in stack
- // if iterate to right paren ) with stack height 0, find a parenthesis segment "(******)"
- for index, chr := range str {
- if chr == '(' {
- stack = append(stack, index)
- } else if chr == ')' {
- if len(stack) == 0 {
- return []string{}, fmt.Errorf("unpaired parentheses: %s", str)
- }
- leftParenIndex := stack[len(stack)-1]
- stack = stack[:len(stack)-1]
- if len(stack) == 0 {
- parenSegmentRecord = append(parenSegmentRecord, segment{
- left: leftParenIndex,
- right: index,
- })
- }
- }
- }
- if len(stack) != 0 {
- return []string{}, fmt.Errorf("unpaired parentheses: %s", str)
- }
-
- // take out tuple-formed type str in tuple argument
- strCopied := str
- for i := len(parenSegmentRecord) - 1; i >= 0; i-- {
- parenSeg := parenSegmentRecord[i]
- strCopied = strCopied[:parenSeg.left] + strCopied[parenSeg.right+1:]
- }
-
- // split the string without parenthesis segments
- tupleStrSegs := strings.Split(strCopied, ",")
-
- // the empty strings are placeholders for parenthesis segments
- // put the parenthesis segments back into segment list
- parenSegCount := 0
- for index, segStr := range tupleStrSegs {
- if segStr == "" {
- parenSeg := parenSegmentRecord[parenSegCount]
- tupleStrSegs[index] = str[parenSeg.left : parenSeg.right+1]
- parenSegCount++
- }
- }
-
- return tupleStrSegs, nil
-}
-
-// makeUintType makes `Uint` ABI type by taking a type bitSize argument.
-// The range of type bitSize is [8, 512] and type bitSize % 8 == 0.
-func makeUintType(typeSize int) (Type, error) {
- if typeSize%8 != 0 || typeSize < 8 || typeSize > 512 {
- return Type{}, fmt.Errorf("unsupported uint type bitSize: %d", typeSize)
- }
- return Type{
- abiTypeID: Uint,
- bitSize: uint16(typeSize),
- }, nil
-}
-
-var (
- // byteType is ABI type constant for byte
- byteType = Type{abiTypeID: Byte}
-
- // boolType is ABI type constant for bool
- boolType = Type{abiTypeID: Bool}
-
- // addressType is ABI type constant for address
- addressType = Type{abiTypeID: Address}
-
- // stringType is ABI type constant for string
- stringType = Type{abiTypeID: String}
-)
-
-// makeUfixedType makes `UFixed` ABI type by taking type bitSize and type precision as arguments.
-// The range of type bitSize is [8, 512] and type bitSize % 8 == 0.
-// The range of type precision is [1, 160].
-func makeUfixedType(typeSize int, typePrecision int) (Type, error) {
- if typeSize%8 != 0 || typeSize < 8 || typeSize > 512 {
- return Type{}, fmt.Errorf("unsupported ufixed type bitSize: %d", typeSize)
- }
- if typePrecision > 160 || typePrecision < 1 {
- return Type{}, fmt.Errorf("unsupported ufixed type precision: %d", typePrecision)
- }
- return Type{
- abiTypeID: Ufixed,
- bitSize: uint16(typeSize),
- precision: uint16(typePrecision),
- }, nil
-}
-
-// makeStaticArrayType makes static length array ABI type by taking
-// array element type and array length as arguments.
-func makeStaticArrayType(argumentType Type, arrayLength uint16) Type {
- return Type{
- abiTypeID: ArrayStatic,
- childTypes: []Type{argumentType},
- staticLength: arrayLength,
- }
-}
-
-// makeDynamicArrayType makes dynamic length array by taking array element type as argument.
-func makeDynamicArrayType(argumentType Type) Type {
- return Type{
- abiTypeID: ArrayDynamic,
- childTypes: []Type{argumentType},
- }
-}
-
-// MakeTupleType makes tuple ABI type by taking an array of tuple element types as argument.
-func MakeTupleType(argumentTypes []Type) (Type, error) {
- if len(argumentTypes) >= math.MaxUint16 {
- return Type{}, fmt.Errorf("tuple type child type number larger than maximum uint16 error")
- }
- return Type{
- abiTypeID: Tuple,
- childTypes: argumentTypes,
- staticLength: uint16(len(argumentTypes)),
- }, nil
-}
-
-// Equal method decides the equality of two types: t == t0.
-func (t Type) Equal(t0 Type) bool {
- if t.abiTypeID != t0.abiTypeID {
- return false
- }
- if t.precision != t0.precision || t.bitSize != t0.bitSize {
- return false
- }
- if t.staticLength != t0.staticLength {
- return false
- }
- if len(t.childTypes) != len(t0.childTypes) {
- return false
- }
- for i := 0; i < len(t.childTypes); i++ {
- if !t.childTypes[i].Equal(t0.childTypes[i]) {
- return false
- }
- }
-
- return true
-}
-
-// IsDynamic method decides if an ABI type is dynamic or static.
-func (t Type) IsDynamic() bool {
- switch t.abiTypeID {
- case ArrayDynamic, String:
- return true
- default:
- for _, childT := range t.childTypes {
- if childT.IsDynamic() {
- return true
- }
- }
- return false
- }
-}
-
-// Assume that the current index on the list of type is an ABI bool type.
-// It returns the difference between the current index and the index of the furthest consecutive Bool type.
-func findBoolLR(typeList []Type, index int, delta int) int {
- until := 0
- for {
- curr := index + delta*until
- if typeList[curr].abiTypeID == Bool {
- if curr != len(typeList)-1 && delta > 0 {
- until++
- } else if curr > 0 && delta < 0 {
- until++
- } else {
- break
- }
- } else {
- until--
- break
- }
- }
- return until
-}
-
-// ByteLen method calculates the byte length of a static ABI type.
-func (t Type) ByteLen() (int, error) {
- switch t.abiTypeID {
- case Address:
- return addressByteSize, nil
- case Byte:
- return singleByteSize, nil
- case Uint, Ufixed:
- return int(t.bitSize / 8), nil
- case Bool:
- return singleBoolSize, nil
- case ArrayStatic:
- if t.childTypes[0].abiTypeID == Bool {
- byteLen := int(t.staticLength+7) / 8
- return byteLen, nil
- }
- elemByteLen, err := t.childTypes[0].ByteLen()
- if err != nil {
- return -1, err
- }
- return int(t.staticLength) * elemByteLen, nil
- case Tuple:
- size := 0
- for i := 0; i < len(t.childTypes); i++ {
- if t.childTypes[i].abiTypeID == Bool {
- // search after bool
- after := findBoolLR(t.childTypes, i, 1)
- // shift the index
- i += after
- // get number of bool
- boolNum := after + 1
- size += (boolNum + 7) / 8
- } else {
- childByteSize, err := t.childTypes[i].ByteLen()
- if err != nil {
- return -1, err
- }
- size += childByteSize
- }
- }
- return size, nil
- default:
- return -1, fmt.Errorf("%s is a dynamic type", t.String())
- }
-}
-
-// AnyTransactionType is the ABI argument type string for a nonspecific transaction argument
-const AnyTransactionType = "txn"
-
-// IsTransactionType checks if a type string represents a transaction type
-// argument, such as "txn", "pay", "keyreg", etc.
-func IsTransactionType(s string) bool {
- switch s {
- case AnyTransactionType, "pay", "keyreg", "acfg", "axfer", "afrz", "appl":
- return true
- default:
- return false
- }
-}
-
-// AccountReferenceType is the ABI argument type string for account references
-const AccountReferenceType = "account"
-
-// AssetReferenceType is the ABI argument type string for asset references
-const AssetReferenceType = "asset"
-
-// ApplicationReferenceType is the ABI argument type string for application references
-const ApplicationReferenceType = "application"
-
-// IsReferenceType checks if a type string represents a reference type argument,
-// such as "account", "asset", or "application".
-func IsReferenceType(s string) bool {
- switch s {
- case AccountReferenceType, AssetReferenceType, ApplicationReferenceType:
- return true
- default:
- return false
- }
-}
-
-// VoidReturnType is the ABI return type string for a method that does not return any value
-const VoidReturnType = "void"
diff --git a/data/abi/abi_type_test.go b/data/abi/abi_type_test.go
deleted file mode 100644
index fb7c7e902..000000000
--- a/data/abi/abi_type_test.go
+++ /dev/null
@@ -1,613 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package abi
-
-import (
- "fmt"
- "math/rand"
- "strconv"
- "strings"
- "testing"
- "time"
-
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/stretchr/testify/require"
-)
-
-func TestMakeTypeValid(t *testing.T) {
- partitiontest.PartitionTest(t)
- // uint
- for i := 8; i <= 512; i += 8 {
- uintType, err := makeUintType(i)
- require.NoError(t, err, "make uint type in valid space should not return error")
- expected := "uint" + strconv.Itoa(i)
- actual := uintType.String()
- require.Equal(t, expected, actual, "makeUintType: expected %s, actual %s", expected, actual)
- }
- // ufixed
- for i := 8; i <= 512; i += 8 {
- for j := 1; j <= 160; j++ {
- ufixedType, err := makeUfixedType(i, j)
- require.NoError(t, err, "make ufixed type in valid space should not return error")
- expected := "ufixed" + strconv.Itoa(i) + "x" + strconv.Itoa(j)
- actual := ufixedType.String()
- require.Equal(t, expected, actual,
- "TypeOf ufixed error: expected %s, actual %s", expected, actual)
- }
- }
- // bool/strings/address/byte + dynamic/static array + tuple
- var testcases = []struct {
- input Type
- testType string
- expected string
- }{
- {input: boolType, testType: "bool", expected: "bool"},
- {input: stringType, testType: "string", expected: "string"},
- {input: addressType, testType: "address", expected: "address"},
- {input: byteType, testType: "byte", expected: "byte"},
- // dynamic array
- {
- input: makeDynamicArrayType(
- Type{
- abiTypeID: Uint,
- bitSize: uint16(32),
- },
- ),
- testType: "dynamic array",
- expected: "uint32[]",
- },
- {
- input: makeDynamicArrayType(
- makeDynamicArrayType(
- byteType,
- ),
- ),
- testType: "dynamic array",
- expected: "byte[][]",
- },
- {
- input: makeStaticArrayType(
- Type{
- abiTypeID: Ufixed,
- bitSize: uint16(128),
- precision: uint16(10),
- },
- uint16(100),
- ),
- testType: "static array",
- expected: "ufixed128x10[100]",
- },
- {
- input: makeStaticArrayType(
- makeStaticArrayType(
- boolType,
- uint16(128),
- ),
- uint16(256),
- ),
- testType: "static array",
- expected: "bool[128][256]",
- },
- // tuple type
- {
- input: Type{
- abiTypeID: Tuple,
- childTypes: []Type{
- {
- abiTypeID: Uint,
- bitSize: uint16(32),
- },
- {
- abiTypeID: Tuple,
- childTypes: []Type{
- addressType,
- byteType,
- makeStaticArrayType(boolType, uint16(10)),
- makeDynamicArrayType(
- Type{
- abiTypeID: Ufixed,
- bitSize: uint16(256),
- precision: uint16(10),
- },
- ),
- },
- staticLength: 4,
- },
- makeDynamicArrayType(byteType),
- },
- staticLength: 3,
- },
- testType: "tuple type",
- expected: "(uint32,(address,byte,bool[10],ufixed256x10[]),byte[])",
- },
- }
- for _, testcase := range testcases {
- t.Run(fmt.Sprintf("MakeType test %s", testcase.testType), func(t *testing.T) {
- actual := testcase.input.String()
- require.Equal(t, testcase.expected, actual,
- "MakeType: expected %s, actual %s", testcase.expected, actual)
- })
- }
-}
-
-func TestMakeTypeInvalid(t *testing.T) {
- partitiontest.PartitionTest(t)
- // uint
- for i := 0; i <= 1000; i++ {
- randInput := rand.Uint32() % (1 << 16)
- for randInput%8 == 0 && randInput <= 512 && randInput >= 8 {
- randInput = rand.Uint32() % (1 << 16)
- }
- // note: if a var mod 8 = 0 (or not) in uint32, then it should mod 8 = 0 (or not) in uint16.
- _, err := makeUintType(int(randInput))
- require.Error(t, err, "makeUintType: should throw error on bitSize input %d", uint16(randInput))
- }
- // ufixed
- for i := 0; i <= 10000; i++ {
- randSize := rand.Uint64() % (1 << 16)
- for randSize%8 == 0 && randSize <= 512 && randSize >= 8 {
- randSize = rand.Uint64() % (1 << 16)
- }
- randPrecision := rand.Uint32()
- for randPrecision >= 1 && randPrecision <= 160 {
- randPrecision = rand.Uint32()
- }
- _, err := makeUfixedType(int(randSize), int(randPrecision))
- require.Error(t, err, "makeUfixedType: should throw error on bitSize %d, precision %d", randSize, randPrecision)
- }
-}
-
-func TestTypeFromStringValid(t *testing.T) {
- partitiontest.PartitionTest(t)
- // uint
- for i := 8; i <= 512; i += 8 {
- expected, err := makeUintType(i)
- require.NoError(t, err, "make uint type in valid space should not return error")
- actual, err := TypeOf(expected.String())
- require.NoError(t, err, "TypeOf: uint parsing error: %s", expected.String())
- require.Equal(t, expected, actual,
- "TypeOf: expected %s, actual %s", expected.String(), actual.String())
- }
- // ufixed
- for i := 8; i <= 512; i += 8 {
- for j := 1; j <= 160; j++ {
- expected, err := makeUfixedType(i, j)
- require.NoError(t, err, "make ufixed type in valid space should not return error")
- actual, err := TypeOf("ufixed" + strconv.Itoa(i) + "x" + strconv.Itoa(j))
- require.NoError(t, err, "TypeOf ufixed parsing error: %s", expected.String())
- require.Equal(t, expected, actual,
- "TypeOf ufixed: expected %s, actual %s", expected.String(), actual.String())
- }
- }
- var testcases = []struct {
- input string
- testType string
- expected Type
- }{
- {input: boolType.String(), testType: "bool", expected: boolType},
- {input: stringType.String(), testType: "string", expected: stringType},
- {input: addressType.String(), testType: "address", expected: addressType},
- {input: byteType.String(), testType: "byte", expected: byteType},
- {
- input: "uint256[]",
- testType: "dynamic array",
- expected: makeDynamicArrayType(Type{abiTypeID: Uint, bitSize: 256}),
- },
- {
- input: "ufixed256x64[]",
- testType: "dynamic array",
- expected: makeDynamicArrayType(
- Type{
- abiTypeID: Ufixed,
- bitSize: 256,
- precision: 64,
- },
- ),
- },
- {
- input: "byte[][][][]",
- testType: "dynamic array",
- expected: makeDynamicArrayType(
- makeDynamicArrayType(
- makeDynamicArrayType(
- makeDynamicArrayType(
- byteType,
- ),
- ),
- ),
- ),
- },
- // static array
- {
- input: "address[100]",
- testType: "static array",
- expected: makeStaticArrayType(
- addressType,
- uint16(100),
- ),
- },
- {
- input: "uint64[][200]",
- testType: "static array",
- expected: makeStaticArrayType(
- makeDynamicArrayType(
- Type{abiTypeID: Uint, bitSize: uint16(64)},
- ),
- uint16(200),
- ),
- },
- // tuple type
- {
- input: "()",
- testType: "tuple type",
- expected: Type{
- abiTypeID: Tuple,
- childTypes: []Type{},
- staticLength: 0,
- },
- },
- {
- input: "(uint32,(address,byte,bool[10],ufixed256x10[]),byte[])",
- testType: "tuple type",
- expected: Type{
- abiTypeID: Tuple,
- childTypes: []Type{
- {
- abiTypeID: Uint,
- bitSize: uint16(32),
- },
- {
- abiTypeID: Tuple,
- childTypes: []Type{
- addressType,
- byteType,
- makeStaticArrayType(boolType, uint16(10)),
- makeDynamicArrayType(
- Type{
- abiTypeID: Ufixed,
- bitSize: uint16(256),
- precision: uint16(10),
- },
- ),
- },
- staticLength: 4,
- },
- makeDynamicArrayType(byteType),
- },
- staticLength: 3,
- },
- },
- {
- input: "(uint32,(address,byte,bool[10],(ufixed256x10[])))",
- testType: "tuple type",
- expected: Type{
- abiTypeID: Tuple,
- childTypes: []Type{
- {
- abiTypeID: Uint,
- bitSize: uint16(32),
- },
- {
- abiTypeID: Tuple,
- childTypes: []Type{
- addressType,
- byteType,
- makeStaticArrayType(boolType, uint16(10)),
- {
- abiTypeID: Tuple,
- childTypes: []Type{
- makeDynamicArrayType(
- Type{
- abiTypeID: Ufixed,
- bitSize: uint16(256),
- precision: uint16(10),
- },
- ),
- },
- staticLength: 1,
- },
- },
- staticLength: 4,
- },
- },
- staticLength: 2,
- },
- },
- {
- input: "((uint32),(address,(byte,bool[10],ufixed256x10[])))",
- testType: "tuple type",
- expected: Type{
- abiTypeID: Tuple,
- childTypes: []Type{
- {
- abiTypeID: Tuple,
- childTypes: []Type{
- {
- abiTypeID: Uint,
- bitSize: uint16(32),
- },
- },
- staticLength: 1,
- },
- {
- abiTypeID: Tuple,
- childTypes: []Type{
- addressType,
- {
- abiTypeID: Tuple,
- childTypes: []Type{
- byteType,
- makeStaticArrayType(boolType, uint16(10)),
- makeDynamicArrayType(
- Type{
- abiTypeID: Ufixed,
- bitSize: uint16(256),
- precision: uint16(10),
- },
- ),
- },
- staticLength: 3,
- },
- },
- staticLength: 2,
- },
- },
- staticLength: 2,
- },
- },
- }
- for _, testcase := range testcases {
- t.Run(fmt.Sprintf("TypeOf test %s", testcase.testType), func(t *testing.T) {
- actual, err := TypeOf(testcase.input)
- require.NoError(t, err, "TypeOf %s parsing error", testcase.testType)
- require.Equal(t, testcase.expected, actual, "TestFromString %s: expected %s, actual %s",
- testcase.testType, testcase.expected.String(), actual.String())
- })
- }
-}
-
-func TestTypeFromStringInvalid(t *testing.T) {
- partitiontest.PartitionTest(t)
- for i := 0; i <= 1000; i++ {
- randSize := rand.Uint64()
- for randSize%8 == 0 && randSize <= 512 && randSize >= 8 {
- randSize = rand.Uint64()
- }
- errorInput := "uint" + strconv.FormatUint(randSize, 10)
- _, err := TypeOf(errorInput)
- require.Error(t, err, "makeUintType: should throw error on bitSize input %d", randSize)
- }
- for i := 0; i <= 10000; i++ {
- randSize := rand.Uint64()
- for randSize%8 == 0 && randSize <= 512 && randSize >= 8 {
- randSize = rand.Uint64()
- }
- randPrecision := rand.Uint64()
- for randPrecision >= 1 && randPrecision <= 160 {
- randPrecision = rand.Uint64()
- }
- errorInput := "ufixed" + strconv.FormatUint(randSize, 10) + "x" + strconv.FormatUint(randPrecision, 10)
- _, err := TypeOf(errorInput)
- require.Error(t, err, "makeUintType: should throw error on bitSize input %d", randSize)
- }
- var testcases = []string{
- // uint
- "uint123x345",
- "uint 128",
- "uint8 ",
- "uint!8",
- "uint[32]",
- "uint-893",
- "uint#120\\",
- // ufixed
- "ufixed000000000016x0000010",
- "ufixed123x345",
- "ufixed 128 x 100",
- "ufixed64x10 ",
- "ufixed!8x2 ",
- "ufixed[32]x16",
- "ufixed-64x+100",
- "ufixed16x+12",
- // dynamic array
- "uint256 []",
- "byte[] ",
- "[][][]",
- "stuff[]",
- // static array
- "ufixed32x10[0]",
- "byte[10 ]",
- "uint64[0x21]",
- // tuple
- "(ufixed128x10))",
- "(,uint128,byte[])",
- "(address,ufixed64x5,)",
- "(byte[16],somethingwrong)",
- "( )",
- "((uint32)",
- "(byte,,byte)",
- "((byte),,(byte))",
- }
- for _, testcase := range testcases {
- t.Run(fmt.Sprintf("TypeOf dynamic array test %s", testcase), func(t *testing.T) {
- _, err := TypeOf(testcase)
- require.Error(t, err, "%s should throw error", testcase)
- })
- }
-}
-
-func generateTupleType(baseTypes []Type, tupleTypes []Type) Type {
- if len(baseTypes) == 0 && len(tupleTypes) == 0 {
- panic("should not pass all nil arrays into generateTupleType")
- }
- tupleLen := 0
- for tupleLen == 0 {
- tupleLen = rand.Intn(20)
- }
- resultTypes := make([]Type, tupleLen)
- for i := 0; i < tupleLen; i++ {
- baseOrTuple := rand.Intn(5)
- if baseOrTuple == 1 && len(tupleTypes) > 0 {
- resultTypes[i] = tupleTypes[rand.Intn(len(tupleTypes))]
- } else {
- resultTypes[i] = baseTypes[rand.Intn(len(baseTypes))]
- }
- }
- return Type{abiTypeID: Tuple, childTypes: resultTypes, staticLength: uint16(tupleLen)}
-}
-
-func TestTypeMISC(t *testing.T) {
- partitiontest.PartitionTest(t)
- rand.Seed(time.Now().Unix())
-
- var testpool = []Type{
- boolType,
- addressType,
- stringType,
- byteType,
- }
- for i := 8; i <= 512; i += 8 {
- uintT, err := makeUintType(i)
- require.NoError(t, err, "make uint type error")
- testpool = append(testpool, uintT)
- }
- for i := 8; i <= 512; i += 8 {
- for j := 1; j <= 160; j++ {
- ufixedT, err := makeUfixedType(i, j)
- require.NoError(t, err, "make ufixed type error: bitSize %d, precision %d", i, j)
- testpool = append(testpool, ufixedT)
- }
- }
- for _, testcase := range testpool {
- testpool = append(testpool, makeDynamicArrayType(testcase))
- testpool = append(testpool, makeStaticArrayType(testcase, 10))
- testpool = append(testpool, makeStaticArrayType(testcase, 20))
- }
-
- for _, testcase := range testpool {
- require.True(t, testcase.Equal(testcase), "test type self equal error")
- }
- baseTestCount := 0
- for baseTestCount < 1000 {
- index0 := rand.Intn(len(testpool))
- index1 := rand.Intn(len(testpool))
- if index0 == index1 {
- continue
- }
- require.False(t, testpool[index0].Equal(testpool[index1]),
- "test type not equal error\n%s\n%s",
- testpool[index0].String(), testpool[index1].String())
- baseTestCount++
- }
-
- testpoolTuple := make([]Type, 0)
- for i := 0; i < 100; i++ {
- testpoolTuple = append(testpoolTuple, generateTupleType(testpool, testpoolTuple))
- }
- for _, testcaseTuple := range testpoolTuple {
- require.True(t, testcaseTuple.Equal(testcaseTuple), "test type tuple equal error")
- }
-
- tupleTestCount := 0
- for tupleTestCount < 100 {
- index0 := rand.Intn(len(testpoolTuple))
- index1 := rand.Intn(len(testpoolTuple))
- if testpoolTuple[index0].String() == testpoolTuple[index1].String() {
- continue
- }
- require.False(t, testpoolTuple[index0].Equal(testpoolTuple[index1]),
- "test type tuple not equal error\n%s\n%s",
- testpoolTuple[index0].String(), testpoolTuple[index1].String())
- tupleTestCount++
- }
-
- testpool = append(testpool, testpoolTuple...)
- isDynamicCount := 0
- for isDynamicCount < 100 {
- index := rand.Intn(len(testpool))
- isDynamicArr := strings.Contains(testpool[index].String(), "[]")
- isDynamicStr := strings.Contains(testpool[index].String(), "string")
- require.Equal(t, isDynamicArr || isDynamicStr, testpool[index].IsDynamic(),
- "test type isDynamic error\n%s", testpool[index].String())
- isDynamicCount++
- }
-
- addressByteLen, err := addressType.ByteLen()
- require.NoError(t, err, "address type bytelen should not return error")
- require.Equal(t, 32, addressByteLen, "address type bytelen should be 32")
- byteByteLen, err := byteType.ByteLen()
- require.NoError(t, err, "byte type bytelen should not return error")
- require.Equal(t, 1, byteByteLen, "byte type bytelen should be 1")
- boolByteLen, err := boolType.ByteLen()
- require.NoError(t, err, "bool type bytelen should be 1")
- require.Equal(t, 1, boolByteLen, "bool type bytelen should be 1")
-
- byteLenTestCount := 0
- for byteLenTestCount < 100 {
- index := rand.Intn(len(testpool))
- testType := testpool[index]
- byteLen, err := testType.ByteLen()
- if testType.IsDynamic() {
- require.Error(t, err, "byteLen test error on %s dynamic type, should have error",
- testType.String())
- } else {
- require.NoError(t, err, "byteLen test error on %s dynamic type, should not have error")
- if testType.abiTypeID == Tuple {
- sizeSum := 0
- for i := 0; i < len(testType.childTypes); i++ {
- if testType.childTypes[i].abiTypeID == Bool {
- // search previous bool
- before := findBoolLR(testType.childTypes, i, -1)
- // search after bool
- after := findBoolLR(testType.childTypes, i, 1)
- // append to heads and tails
- require.True(t, before%8 == 0, "expected tuple bool compact by 8")
- if after > 7 {
- after = 7
- }
- i += after
- sizeSum++
- } else {
- childByteSize, err := testType.childTypes[i].ByteLen()
- require.NoError(t, err, "byteLen not expected to fail on tuple child type")
- sizeSum += childByteSize
- }
- }
-
- require.Equal(t, sizeSum, byteLen,
- "%s do not match calculated byte length %d", testType.String(), sizeSum)
- } else if testType.abiTypeID == ArrayStatic {
- if testType.childTypes[0].abiTypeID == Bool {
- expected := testType.staticLength / 8
- if testType.staticLength%8 != 0 {
- expected++
- }
- actual, err := testType.ByteLen()
- require.NoError(t, err, "%s should not return error on byteLen test")
- require.Equal(t, int(expected), actual, "%s do not match calculated byte length %d",
- testType.String(), expected)
- } else {
- childSize, err := testType.childTypes[0].ByteLen()
- require.NoError(t, err, "%s should not return error on byteLen test", testType.childTypes[0].String())
- expected := childSize * int(testType.staticLength)
- require.Equal(t, expected, byteLen,
- "%s do not match calculated byte length %d", testType.String(), expected)
- }
- }
- }
- byteLenTestCount++
- }
-}
diff --git a/data/account/participationRegistry.go b/data/account/participationRegistry.go
index e36f22451..e1c82a892 100644
--- a/data/account/participationRegistry.go
+++ b/data/account/participationRegistry.go
@@ -220,6 +220,9 @@ var ErrNoKeyForID = errors.New("no valid key found for the participationID")
// ErrSecretNotFound is used when attempting to lookup secrets for a particular round.
var ErrSecretNotFound = errors.New("the participation ID did not have secrets for the requested round")
+// ErrStateProofVerifierNotFound states that no state proof field was found.
+var ErrStateProofVerifierNotFound = errors.New("record contains no StateProofVerifier")
+
// ParticipationRegistry contain all functions for interacting with the Participation Registry.
type ParticipationRegistry interface {
// Insert adds a record to storage and computes the ParticipationID
@@ -767,6 +770,10 @@ func (db *participationDB) GetStateProofSecretsForRound(id ParticipationID, roun
if err != nil {
return StateProofSecretsForRound{}, err
}
+ if partRecord.StateProof == nil {
+ return StateProofSecretsForRound{},
+ fmt.Errorf("%w: for participation ID %v", ErrStateProofVerifierNotFound, id)
+ }
var result StateProofSecretsForRound
result.ParticipationRecord = partRecord.ParticipationRecord
diff --git a/data/account/participationRegistry_test.go b/data/account/participationRegistry_test.go
index 8e02ff4e4..286edf117 100644
--- a/data/account/participationRegistry_test.go
+++ b/data/account/participationRegistry_test.go
@@ -27,10 +27,9 @@ import (
"os"
"path/filepath"
"strconv"
- "sync/atomic"
-
"strings"
"sync"
+ "sync/atomic"
"testing"
"time"
@@ -958,6 +957,36 @@ func TestAddStateProofKeys(t *testing.T) {
}
}
+func TestGetRoundSecretsWithNilStateProofVerifier(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := assert.New(t)
+ registry, dbfile := getRegistry(t)
+ defer registryCloseTest(t, registry, dbfile)
+
+ access, err := db.MakeAccessor("stateprooftest", false, true)
+ if err != nil {
+ panic(err)
+ }
+ root, err := GenerateRoot(access)
+ p, err := FillDBWithParticipationKeys(access, root.Address(), 0, basics.Round(stateProofIntervalForTests*2), 3)
+ access.Close()
+ a.NoError(err)
+
+ // Install a key for testing
+ id, err := registry.Insert(p.Participation)
+ a.NoError(err)
+
+ // ensuring that GetStateProof will receive from cache a participationRecord without StateProof field.
+ prt := registry.cache[id]
+ prt.StateProof = nil
+ registry.cache[id] = prt
+
+ a.NoError(registry.Flush(defaultTimeout))
+
+ _, err = registry.GetStateProofSecretsForRound(id, basics.Round(stateProofIntervalForTests)-1)
+ a.ErrorIs(err, ErrStateProofVerifierNotFound)
+}
+
func TestSecretNotFound(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
diff --git a/data/account/registeryDbOps.go b/data/account/registeryDbOps.go
index 6fa69bb15..282008eb7 100644
--- a/data/account/registeryDbOps.go
+++ b/data/account/registeryDbOps.go
@@ -21,9 +21,10 @@ import (
"database/sql"
"errors"
"fmt"
+ "strings"
+
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
- "strings"
)
type dbOp interface {
@@ -168,11 +169,7 @@ func (i *insertOp) apply(db *participationDB) (err error) {
// Create Rolling entry
result, err = tx.Exec(insertRollingQuery, pk, rawVoting)
- if err = verifyExecWithOneRowEffected(err, result, "insert rolling"); err != nil {
- return err
- }
-
- return nil
+ return verifyExecWithOneRowEffected(err, result, "insert rolling")
})
return err
}
diff --git a/data/accountManager.go b/data/accountManager.go
index d44091f80..aa5064e09 100644
--- a/data/accountManager.go
+++ b/data/accountManager.go
@@ -79,10 +79,10 @@ func (manager *AccountManager) Keys(rnd basics.Round) (out []account.Participati
// StateProofKeys returns a list of Participation accounts, and their stateproof secrets
func (manager *AccountManager) StateProofKeys(rnd basics.Round) (out []account.StateProofSecretsForRound) {
for _, part := range manager.registry.GetAll() {
- if part.OverlapsInterval(rnd, rnd) {
+ if part.StateProof != nil && part.OverlapsInterval(rnd, rnd) {
partRndSecrets, err := manager.registry.GetStateProofSecretsForRound(part.ParticipationID, rnd)
if err != nil {
- manager.log.Errorf("error while loading round secrets from participation registry: %w", err)
+ manager.log.Errorf("error while loading round secrets from participation registry: %v", err)
continue
}
out = append(out, partRndSecrets)
diff --git a/data/accountManager_test.go b/data/accountManager_test.go
index 9d464cba1..1fcfe56bf 100644
--- a/data/accountManager_test.go
+++ b/data/accountManager_test.go
@@ -17,6 +17,7 @@
package data
import (
+ "bytes"
"fmt"
"os"
"path/filepath"
@@ -248,3 +249,46 @@ func TestAccountManagerOverlappingStateProofKeys(t *testing.T) {
res = acctManager.StateProofKeys(basics.Round(merklesignature.KeyLifetimeDefault * 3))
a.Equal(1, len(res))
}
+
+func TestGetStateProofKeysDontLogErrorOnNilStateProof(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := assert.New(t)
+
+ registry, dbName := getRegistryImpl(t, false, true)
+ defer registryCloseTest(t, registry, dbName)
+
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Error)
+ logbuffer := bytes.NewBuffer(nil)
+ log.SetOutput(logbuffer)
+
+ acctManager := MakeAccountManager(log, registry)
+ databaseFiles := make([]string, 0)
+ defer func() {
+ for _, fileName := range databaseFiles {
+ os.Remove(fileName)
+ os.Remove(fileName + "-shm")
+ os.Remove(fileName + "-wal")
+ os.Remove(fileName + "-journal")
+ }
+ }()
+
+ // Generate 2 participations under the same account
+ store, err := db.MakeAccessor("stateprooftest", false, true)
+ a.NoError(err)
+ root, err := account.GenerateRoot(store)
+ a.NoError(err)
+ part1, err := account.FillDBWithParticipationKeys(store, root.Address(), 0, basics.Round(merklesignature.KeyLifetimeDefault*2), 3)
+ a.NoError(err)
+ store.Close()
+
+ part1.StateProofSecrets = nil
+ _, err = registry.Insert(part1.Participation)
+ a.NoError(err)
+
+ logbuffer.Reset()
+ acctManager.StateProofKeys(1)
+ lg := logbuffer.String()
+ a.False(strings.Contains(lg, account.ErrStateProofVerifierNotFound.Error()))
+ a.False(strings.Contains(lg, "level=error"), "expected no error in log:", lg)
+}
diff --git a/data/basics/address.go b/data/basics/address.go
index 5eed1c512..412b7bf75 100644
--- a/data/basics/address.go
+++ b/data/basics/address.go
@@ -24,23 +24,6 @@ import (
"github.com/algorand/go-algorand/crypto"
)
-// NOTE: Another (partial) implementation of `basics.Address` is in `data/abi`.
-// The reason of not using this `Address` in `data/abi` is that:
-// - `data/basics` has C dependencies (`go-algorand/crypto`)
-// - `go-algorand-sdk` has dependency to `go-algorand` for `ABI`
-// - if `go-algorand`'s ABI uses `basics.Address`, then it would be
-// impossible to up the version of `go-algorand` in `go-algorand-sdk`
-
-// This is discussed in:
-// - ISSUE https://github.com/algorand/go-algorand/issues/3355
-// - PR https://github.com/algorand/go-algorand/pull/3375
-
-// There are two solutions:
-// - One is to refactoring `crypto.Digest`, `crypto.Hash` and `basics.Address`
-// into packages that does not need `libsodium` crypto dependency
-// - The other is wrapping `libsodium` in a driver interface to make crypto
-// package importable (even if `libsodium` does not exist)
-
type (
// Address is a unique identifier corresponding to ownership of money
Address crypto.Digest
diff --git a/data/bookkeeping/genesis.go b/data/bookkeeping/genesis.go
index 114bb37f8..7f01519e8 100644
--- a/data/bookkeeping/genesis.go
+++ b/data/bookkeeping/genesis.go
@@ -18,7 +18,7 @@ package bookkeeping
import (
"fmt"
- "io/ioutil"
+ "os"
"time"
"github.com/algorand/go-algorand/config"
@@ -86,7 +86,7 @@ type Genesis struct {
// LoadGenesisFromFile attempts to load a Genesis structure from a (presumably) genesis.json file.
func LoadGenesisFromFile(genesisFile string) (genesis Genesis, err error) {
// Load genesis.json
- genesisText, err := ioutil.ReadFile(genesisFile)
+ genesisText, err := os.ReadFile(genesisFile)
if err != nil {
return
}
diff --git a/data/bookkeeping/txn_merkle_test.go b/data/bookkeeping/txn_merkle_test.go
index 30a34ab7f..4ead543da 100644
--- a/data/bookkeeping/txn_merkle_test.go
+++ b/data/bookkeeping/txn_merkle_test.go
@@ -162,6 +162,7 @@ func BenchmarkTxnRoots(b *testing.B) {
crypto.RandBytes(txn.PaymentTxnFields.Receiver[:])
sigtxn := transactions.SignedTxn{Txn: txn}
+ crypto.RandBytes(sigtxn.Sig[:])
ad := transactions.ApplyData{}
stib, err := blk.BlockHeader.EncodeSignedTxn(sigtxn, ad)
@@ -173,7 +174,7 @@ func BenchmarkTxnRoots(b *testing.B) {
break
}
}
-
+ b.Logf("Made block with %d transactions and %d txn bytes", len(blk.Payset), len(protocol.Encode(blk.Payset)))
var r crypto.Digest
b.Run("FlatCommit", func(b *testing.B) {
@@ -192,6 +193,14 @@ func BenchmarkTxnRoots(b *testing.B) {
}
})
+ b.Run("SHA256MerkleCommit", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var err error
+ r, err = blk.paysetCommitSHA256()
+ require.NoError(b, err)
+ }
+ })
+
_ = r
}
diff --git a/data/ledger.go b/data/ledger.go
index 8fc03cb6e..101da721a 100644
--- a/data/ledger.go
+++ b/data/ledger.go
@@ -184,7 +184,7 @@ func (l *Ledger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
}
}
- totals, err := l.OnlineTotals(r) //nolint:typecheck
+ totals, err := l.OnlineTotals(r)
if err != nil {
return basics.MicroAlgos{}, err
}
diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go
index d5efc6c89..d5df86811 100644
--- a/data/pools/transactionPool.go
+++ b/data/pools/transactionPool.go
@@ -859,15 +859,13 @@ func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Tim
}
}
stats.TotalLength += uint64(encodedLen)
- stats.StateProofNextRound = uint64(assembled.Block().StateProofTracking[protocol.StateProofBasic].StateProofNextRound)
if txib.Txn.Type == protocol.StateProofTx {
stats.StateProofStats = pool.getStateProofStats(&txib, encodedLen)
}
}
-
stats.AverageFee = totalFees / uint64(stats.IncludedCount)
}
-
+ stats.StateProofNextRound = uint64(assembled.Block().StateProofTracking[protocol.StateProofBasic].StateProofNextRound)
var details struct {
Round uint64
}
diff --git a/data/pools/transactionPool_test.go b/data/pools/transactionPool_test.go
index 7dcc4c6ba..acbc5a9bc 100644
--- a/data/pools/transactionPool_test.go
+++ b/data/pools/transactionPool_test.go
@@ -21,6 +21,9 @@ import (
"bytes"
"fmt"
"math/rand"
+ "os"
+ "runtime"
+ "runtime/pprof"
"strings"
"testing"
"time"
@@ -1099,6 +1102,110 @@ func BenchmarkTransactionPoolPending(b *testing.B) {
}
}
+// BenchmarkTransactionPoolRecompute attempts to build a transaction pool of 3x block size
+// and then calls recomputeBlockEvaluator, to update the pool given the just-committed txns.
+// For b.N is does this process repeatedly given the size of N.
+func BenchmarkTransactionPoolRecompute(b *testing.B) {
+ b.Log("Running with b.N", b.N)
+ poolSize := 100000
+ numOfAccounts := 100
+ numTransactions := 75000
+ blockTxnCount := 25000
+
+ myVersion := protocol.ConsensusVersion("test-large-blocks")
+ myProto := config.Consensus[protocol.ConsensusCurrentVersion]
+ if myProto.MaxTxnBytesPerBlock != 5*1024*1024 {
+ b.FailNow() // intended to use with 5MB blocks
+ }
+ config.Consensus[myVersion] = myProto
+
+ // Generate accounts
+ secrets := make([]*crypto.SignatureSecrets, numOfAccounts)
+ addresses := make([]basics.Address, numOfAccounts)
+
+ for i := 0; i < numOfAccounts; i++ {
+ secret := keypair()
+ addr := basics.Address(secret.SignatureVerifier)
+ secrets[i] = secret
+ addresses[i] = addr
+ }
+
+ l := mockLedger(b, initAccFixed(addresses, 1<<50), myVersion)
+ cfg := config.GetDefaultLocal()
+ cfg.TxPoolSize = poolSize
+ cfg.EnableProcessBlockStats = false
+
+ setupPool := func() (*TransactionPool, map[transactions.Txid]ledgercore.IncludedTransactions, uint) {
+ transactionPool := MakeTransactionPool(l, cfg, logging.Base())
+
+ // make some transactions
+ var signedTransactions []transactions.SignedTxn
+ for i := 0; i < numTransactions; i++ {
+ tx := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: addresses[i%numOfAccounts],
+ Fee: basics.MicroAlgos{Raw: 20000 + proto.MinTxnFee},
+ FirstValid: 0,
+ LastValid: basics.Round(proto.MaxTxnLife),
+ GenesisHash: l.GenesisHash(),
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: addresses[rand.Intn(numOfAccounts)],
+ Amount: basics.MicroAlgos{Raw: proto.MinBalance + uint64(rand.Intn(1<<32))},
+ },
+ }
+
+ signedTx := tx.Sign(secrets[i%numOfAccounts])
+ signedTransactions = append(signedTransactions, signedTx)
+ require.NoError(b, transactionPool.RememberOne(signedTx))
+ }
+
+ // make args for recomputeBlockEvaluator() like OnNewBlock() would
+ var knownCommitted uint
+ committedTxIds := make(map[transactions.Txid]ledgercore.IncludedTransactions)
+ for i := 0; i < blockTxnCount; i++ {
+ knownCommitted++
+ // OK to use empty IncludedTransactions: recomputeBlockEvaluator is only checking map membership
+ committedTxIds[signedTransactions[i].ID()] = ledgercore.IncludedTransactions{}
+ }
+ b.Logf("Made transactionPool with %d signedTransactions, %d committedTxIds, %d knownCommitted",
+ len(signedTransactions), len(committedTxIds), knownCommitted)
+ b.Logf("transactionPool pendingTxGroups %d rememberedTxGroups %d",
+ len(transactionPool.pendingTxGroups), len(transactionPool.rememberedTxGroups))
+ return transactionPool, committedTxIds, knownCommitted
+ }
+
+ transactionPool := make([]*TransactionPool, b.N)
+ committedTxIds := make([]map[transactions.Txid]ledgercore.IncludedTransactions, b.N)
+ knownCommitted := make([]uint, b.N)
+ for i := 0; i < b.N; i++ {
+ transactionPool[i], committedTxIds[i], knownCommitted[i] = setupPool()
+ }
+ time.Sleep(time.Second)
+ runtime.GC()
+ // CPU profiler if CPUPROFILE set
+ var profF *os.File
+ if os.Getenv("CPUPROFILE") != "" {
+ var err error
+ profF, err = os.Create(fmt.Sprintf("recomputePool-%d-%d.prof", b.N, crypto.RandUint64()))
+ require.NoError(b, err)
+ }
+
+ // call recomputeBlockEvaluator
+ if profF != nil {
+ pprof.StartCPUProfile(profF)
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ transactionPool[i].recomputeBlockEvaluator(committedTxIds[i], knownCommitted[i])
+ }
+ b.StopTimer()
+ if profF != nil {
+ pprof.StopCPUProfile()
+ }
+}
+
func BenchmarkTransactionPoolSteadyState(b *testing.B) {
poolSize := 100000
@@ -1432,11 +1539,17 @@ func TestStateProofLogging(t *testing.T) {
lines = append(lines, scanner.Text())
}
fmt.Println(lines[len(lines)-1])
+ // Verify that the StateProofNextRound is added when there are no transactions
+ var int1, nextRound uint64
+ var str1 string
+ partsNext := strings.Split(lines[len(lines)-10], "TransactionsLoopStartTime:")
+ fmt.Sscanf(partsNext[1], "%d, StateProofNextRound:%d, %s", &int1, &nextRound, &str1)
+ require.Equal(t, int(512), int(nextRound))
+
parts := strings.Split(lines[len(lines)-1], "StateProofNextRound:")
// Verify the Metrics is correct
- var nextRound, pWeight, signedWeight, numReveals, posToReveal, txnSize uint64
- var str1 string
+ var pWeight, signedWeight, numReveals, posToReveal, txnSize uint64
fmt.Sscanf(parts[1], "%d, ProvenWeight:%d, SignedWeight:%d, NumReveals:%d, NumPosToReveal:%d, TxnSize:%d\"%s",
&nextRound, &pWeight, &signedWeight, &numReveals, &posToReveal, &txnSize, &str1)
require.Equal(t, uint64(768), nextRound)
diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md
index 0494971b3..e577ea93f 100644
--- a/data/transactions/logic/README.md
+++ b/data/transactions/logic/README.md
@@ -584,16 +584,23 @@ Account fields used in the `acct_params_get` opcode.
| `b target` | branch unconditionally to TARGET |
| `return` | use A as success value; end |
| `pop` | discard A |
+| `popn n` | Remove N values from the top of the stack |
| `dup` | duplicate A |
| `dup2` | duplicate A and B |
+| `dupn n` | duplicate A, N times |
| `dig n` | Nth value from the top of the stack. dig 0 is equivalent to dup |
+| `bury n` | Replace the Nth value from the top of the stack. bury 0 fails. |
| `cover n` | remove top of stack, and place it deeper in the stack such that N elements are above it. Fails if stack depth <= N. |
| `uncover n` | remove the value at depth N in the stack and shift above items down so the Nth deep value is on top of the stack. Fails if stack depth <= N. |
+| `frame_dig i` | Nth (signed) value from the frame pointer. |
+| `frame_bury i` | Replace the Nth (signed) value from the frame pointer in the stack |
| `swap` | swaps A and B on stack |
| `select` | selects one of two values based on top-of-stack: B if C != 0, else A |
| `assert` | immediately fail unless A is a non-zero number |
| `callsub target` | branch unconditionally to TARGET, saving the next instruction on the call stack |
+| `proto a r` | Prepare top call frame for a retsub that will assume A args and R return values. |
| `retsub` | pop the top instruction from the call stack and branch to it |
+| `switch target ...` | branch to the Ath label. Continue at following instruction if index A exceeds the number of labels. |
### State Access
@@ -615,7 +622,7 @@ Account fields used in the `acct_params_get` opcode.
| `app_params_get f` | X is field F from app A. Y is 1 if A exists, else 0 |
| `acct_params_get f` | X is field F from account A. Y is 1 if A owns positive algos, else 0 |
| `log` | write A to log state of the current application |
-| `block f` | field F of block A. Fail unless A falls between txn.LastValid-1002 and the current round (exclusive) |
+| `block f` | field F of block A. Fail unless A falls between txn.LastValid-1002 and txn.FirstValid (exclusive) |
### Inner Transactions
diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md
index 5fbd310d2..d093c0823 100644
--- a/data/transactions/logic/TEAL_opcodes.md
+++ b/data/transactions/logic/TEAL_opcodes.md
@@ -610,6 +610,27 @@ See `bnz` for details on how branches work. `b` always jumps to the offset.
- immediately fail unless A is a non-zero number
- Availability: v3
+## bury n
+
+- Opcode: 0x45 {uint8 depth}
+- Stack: ..., A &rarr; ...
+- Replace the Nth value from the top of the stack. bury 0 fails.
+- Availability: v8
+
+## popn n
+
+- Opcode: 0x46 {uint8 stack depth}
+- Stack: ..., [N items] &rarr; ...
+- Remove N values from the top of the stack
+- Availability: v8
+
+## dupn n
+
+- Opcode: 0x47 {uint8 copy count}
+- Stack: ..., A &rarr; ..., A, [N copies of A]
+- duplicate A, N times
+- Availability: v8
+
## pop
- Opcode: 0x48
@@ -790,7 +811,7 @@ When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on
## json_ref r
-- Opcode: 0x5f {string return type}
+- Opcode: 0x5f {uint8 return type}
- Stack: ..., A: []byte, B: []byte &rarr; ..., any
- key B's value, of type R, from a [valid](jsonspec.md) utf-8 encoded json object A
- **Cost**: 25 + 2 per 7 bytes of A
@@ -1042,7 +1063,7 @@ pushint args are not added to the intcblock during assembly processes
- branch unconditionally to TARGET, saving the next instruction on the call stack
- Availability: v4
-The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.
+The call stack is separate from the data stack. Only `callsub`, `retsub`, and `proto` manipulate it.
## retsub
@@ -1051,7 +1072,37 @@ The call stack is separate from the data stack. Only `callsub` and `retsub` mani
- pop the top instruction from the call stack and branch to it
- Availability: v4
-The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.
+If the current frame was prepared by `proto A R`, `retsub` will remove the 'A' arguments from the stack, move the `R` return values down, and pop any stack locations above the relocated return values.
+
+## proto a r
+
+- Opcode: 0x8a {uint8 arguments} {uint8 return values}
+- Stack: ... &rarr; ...
+- Prepare top call frame for a retsub that will assume A args and R return values.
+- Availability: v8
+
+Fails unless the last instruction executed was a `callsub`.
+
+## frame_dig i
+
+- Opcode: 0x8b {int8 frame slot}
+- Stack: ... &rarr; ..., any
+- Nth (signed) value from the frame pointer.
+- Availability: v8
+
+## frame_bury i
+
+- Opcode: 0x8c {int8 frame slot}
+- Stack: ..., A &rarr; ...
+- Replace the Nth (signed) value from the frame pointer in the stack
+- Availability: v8
+
+## switch target ...
+
+- Opcode: 0x8d {uint8 branch count} [{int16 branch offset, big-endian}, ...]
+- Stack: ..., A: uint64 &rarr; ...
+- branch to the Ath label. Continue at following instruction if index A exceeds the number of labels.
+- Availability: v8
## shl
@@ -1401,7 +1452,7 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
- Opcode: 0xd1 {uint8 block field}
- Stack: ..., A: uint64 &rarr; ..., any
-- field F of block A. Fail unless A falls between txn.LastValid-1002 and the current round (exclusive)
+- field F of block A. Fail unless A falls between txn.LastValid-1002 and txn.FirstValid (exclusive)
- Availability: v7
`block` Fields:
diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go
index e175a1703..fd3c79bf4 100644
--- a/data/transactions/logic/assembler.go
+++ b/data/transactions/logic/assembler.go
@@ -27,11 +27,12 @@ import (
"errors"
"fmt"
"io"
+ "math"
"sort"
"strconv"
"strings"
- "github.com/algorand/go-algorand/data/abi"
+ "github.com/algorand/avm-abi/abi"
"github.com/algorand/go-algorand/data/basics"
)
@@ -48,10 +49,13 @@ type Writer interface {
type labelReference struct {
sourceLine int
- // position of the opcode start that refers to the label
+ // position of the label reference
position int
label string
+
+ // ending positions of the opcode containing the label reference.
+ offsetPosition int
}
type constReference interface {
@@ -224,11 +228,13 @@ type OpStream struct {
intc []uint64 // observed ints in code. We'll put them into a intcblock
intcRefs []intReference // references to int pseudo-op constants, used for optimization
- hasIntcBlock bool // prevent prepending intcblock because asm has one
+ cntIntcBlock int // prevent prepending intcblock because asm has one
+ hasPseudoInt bool // were any `int` pseudo ops used?
bytec [][]byte // observed bytes in code. We'll put them into a bytecblock
bytecRefs []byteReference // references to byte/addr pseudo-op constants, used for optimization
- hasBytecBlock bool // prevent prepending bytecblock because asm has one
+ cntBytecBlock int // prevent prepending bytecblock because asm has one
+ hasPseudoByte bool // were any `byte` (or equivalent) pseudo ops used?
// tracks information we know to be true at the point being assembled
known ProgramKnowledge
@@ -260,6 +266,7 @@ func newOpStream(version uint64) OpStream {
OffsetToLine: make(map[int]int),
typeTracking: true,
Version: version,
+ known: ProgramKnowledge{fp: -1},
}
for i := range o.known.scratchSpace {
@@ -277,7 +284,7 @@ type ProgramKnowledge struct {
// Return.Types. If `deadcode` is true, `stack` should be empty.
stack StackTypes
- // bottom is the type given out when known is empty. It is StackNone at
+ // bottom is the type given out when `stack` is empty. It is StackNone at
// program start, so, for example, a `+` opcode at the start of a program
// fails. But when a label or callsub is encountered, `stack` is truncated
// and `bottom` becomes StackAny, because we don't track program state
@@ -289,9 +296,24 @@ type ProgramKnowledge struct {
// errors should be reported.
deadcode bool
+ // fp is the frame pointer, if known/usable, or -1 if not. When
+ // encountering a `proto`, `stack` is grown to fit `args`, and this `fp` is
+ // set to the top of those args. This may not be the "real" fp when the
+ // program is actually evaluated, but it is good enough for frame_{dig/bury}
+ // to work from there.
+ fp int
+
scratchSpace [256]StackType
}
+func (pgm *ProgramKnowledge) top() (StackType, bool) {
+ if len(pgm.stack) == 0 {
+ return pgm.bottom, pgm.bottom != StackNone
+ }
+ last := len(pgm.stack) - 1
+ return pgm.stack[last], true
+}
+
func (pgm *ProgramKnowledge) pop() StackType {
if len(pgm.stack) == 0 {
return pgm.bottom
@@ -322,6 +344,7 @@ func (pgm *ProgramKnowledge) label() {
func (pgm *ProgramKnowledge) reset() {
pgm.stack = nil
pgm.bottom = StackAny
+ pgm.fp = -1
pgm.deadcode = false
for i := range pgm.scratchSpace {
pgm.scratchSpace[i] = StackAny
@@ -344,11 +367,11 @@ func (ops *OpStream) recordSourceLine() {
}
// referToLabel records an opcode label reference to resolve later
-func (ops *OpStream) referToLabel(pc int, label string) {
- ops.labelReferences = append(ops.labelReferences, labelReference{ops.sourceLine, pc, label})
+func (ops *OpStream) referToLabel(pc int, label string, offsetPosition int) {
+ ops.labelReferences = append(ops.labelReferences, labelReference{ops.sourceLine, pc, label, offsetPosition})
}
-type refineFunc func(pgm *ProgramKnowledge, immediates []string) (StackTypes, StackTypes)
+type refineFunc func(pgm *ProgramKnowledge, immediates []string) (StackTypes, StackTypes, error)
// returns allows opcodes like `txn` to be specific about their return value
// types, based on the field requested, rather than use Any as specified by
@@ -373,18 +396,18 @@ func (ops *OpStream) returns(spec *OpSpec, replacement StackType) {
func (ops *OpStream) Intc(constIndex uint) {
switch constIndex {
case 0:
- ops.pending.WriteByte(0x22) // intc_0
+ ops.pending.WriteByte(OpsByName[ops.Version]["intc_0"].Opcode)
case 1:
- ops.pending.WriteByte(0x23) // intc_1
+ ops.pending.WriteByte(OpsByName[ops.Version]["intc_1"].Opcode)
case 2:
- ops.pending.WriteByte(0x24) // intc_2
+ ops.pending.WriteByte(OpsByName[ops.Version]["intc_2"].Opcode)
case 3:
- ops.pending.WriteByte(0x25) // intc_3
+ ops.pending.WriteByte(OpsByName[ops.Version]["intc_3"].Opcode)
default:
if constIndex > 0xff {
ops.error("cannot have more than 256 int constants")
}
- ops.pending.WriteByte(0x21) // intc
+ ops.pending.WriteByte(OpsByName[ops.Version]["intc"].Opcode)
ops.pending.WriteByte(uint8(constIndex))
}
if constIndex >= uint(len(ops.intc)) {
@@ -394,8 +417,10 @@ func (ops *OpStream) Intc(constIndex uint) {
}
}
-// Uint writes opcodes for loading a uint literal
-func (ops *OpStream) Uint(val uint64) {
+// IntLiteral writes opcodes for loading a uint literal
+func (ops *OpStream) IntLiteral(val uint64) {
+ ops.hasPseudoInt = true
+
found := false
var constIndex uint
for i, cv := range ops.intc {
@@ -405,7 +430,11 @@ func (ops *OpStream) Uint(val uint64) {
break
}
}
+
if !found {
+ if ops.cntIntcBlock > 0 {
+ ops.errorf("int %d used without %d in intcblock", val, val)
+ }
constIndex = uint(len(ops.intc))
ops.intc = append(ops.intc, val)
}
@@ -420,18 +449,18 @@ func (ops *OpStream) Uint(val uint64) {
func (ops *OpStream) Bytec(constIndex uint) {
switch constIndex {
case 0:
- ops.pending.WriteByte(0x28) // bytec_0
+ ops.pending.WriteByte(OpsByName[ops.Version]["bytec_0"].Opcode)
case 1:
- ops.pending.WriteByte(0x29) // bytec_1
+ ops.pending.WriteByte(OpsByName[ops.Version]["bytec_1"].Opcode)
case 2:
- ops.pending.WriteByte(0x2a) // bytec_2
+ ops.pending.WriteByte(OpsByName[ops.Version]["bytec_2"].Opcode)
case 3:
- ops.pending.WriteByte(0x2b) // bytec_3
+ ops.pending.WriteByte(OpsByName[ops.Version]["bytec_3"].Opcode)
default:
if constIndex > 0xff {
ops.error("cannot have more than 256 byte constants")
}
- ops.pending.WriteByte(0x27) // bytec
+ ops.pending.WriteByte(OpsByName[ops.Version]["bytec"].Opcode)
ops.pending.WriteByte(uint8(constIndex))
}
if constIndex >= uint(len(ops.bytec)) {
@@ -444,6 +473,8 @@ func (ops *OpStream) Bytec(constIndex uint) {
// ByteLiteral writes opcodes and data for loading a []byte literal
// Values are accumulated so that they can be put into a bytecblock
func (ops *OpStream) ByteLiteral(val []byte) {
+ ops.hasPseudoByte = true
+
found := false
var constIndex uint
for i, cv := range ops.bytec {
@@ -454,6 +485,9 @@ func (ops *OpStream) ByteLiteral(val []byte) {
}
}
if !found {
+ if ops.cntBytecBlock > 0 {
+ ops.errorf("byte/addr/method used without value in bytecblock")
+ }
constIndex = uint(len(ops.bytec))
ops.bytec = append(ops.bytec, val)
}
@@ -468,23 +502,46 @@ func asmInt(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
return ops.error("int needs one argument")
}
+
+ // After backBranchEnabledVersion, control flow is confusing, so if there's
+ // a manual cblock, use push instead of trying to use what's given.
+ if ops.cntIntcBlock > 0 && ops.Version >= backBranchEnabledVersion {
+ // We don't understand control-flow, so use pushint
+ ops.warnf("int %s used with explicit intcblock. must pushint", args[0])
+ pushint := OpsByName[ops.Version]["pushint"]
+ return asmPushInt(ops, &pushint, args)
+ }
+
+ // There are no backjumps, but there are multiple cblocks. Maybe one is
+ // conditional skipped. Too confusing.
+ if ops.cntIntcBlock > 1 {
+ pushint, ok := OpsByName[ops.Version]["pushint"]
+ if ok {
+ return asmPushInt(ops, &pushint, args)
+ }
+ return ops.errorf("int %s used with manual intcblocks. Use intc.", args[0])
+ }
+
+ // In both of the above clauses, we _could_ track whether a particular
+ // intcblock dominates the current instruction. If so, we could use it.
+
// check txn type constants
i, ok := txnTypeMap[args[0]]
if ok {
- ops.Uint(i)
+ ops.IntLiteral(i)
return nil
}
- // check OnCompetion constants
+ // check OnCompletion constants
oc, isOCStr := onCompletionMap[args[0]]
if isOCStr {
- ops.Uint(oc)
+ ops.IntLiteral(oc)
return nil
}
val, err := strconv.ParseUint(args[0], 0, 64)
if err != nil {
return ops.error(err)
}
- ops.Uint(val)
+ ops.IntLiteral(val)
return nil
}
@@ -493,7 +550,7 @@ func asmIntC(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
return ops.error("intc operation needs one argument")
}
- constIndex, err := simpleImm(args[0], "constant")
+ constIndex, err := byteImm(args[0], "constant")
if err != nil {
return ops.error(err)
}
@@ -504,7 +561,7 @@ func asmByteC(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
return ops.error("bytec operation needs one argument")
}
- constIndex, err := simpleImm(args[0], "constant")
+ constIndex, err := byteImm(args[0], "constant")
if err != nil {
return ops.error(err)
}
@@ -545,7 +602,7 @@ func asmPushBytes(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-func base32DecdodeAnyPadding(x string) (val []byte, err error) {
+func base32DecodeAnyPadding(x string) (val []byte, err error) {
val, err = base32.StdEncoding.WithPadding(base32.NoPadding).DecodeString(x)
if err != nil {
// try again with standard padding
@@ -567,7 +624,7 @@ func parseBinaryArgs(args []string) (val []byte, consumed int, err error) {
err = errors.New("byte base32 arg lacks close paren")
return
}
- val, err = base32DecdodeAnyPadding(arg[open+1 : close])
+ val, err = base32DecodeAnyPadding(arg[open+1 : close])
if err != nil {
return
}
@@ -595,7 +652,7 @@ func parseBinaryArgs(args []string) (val []byte, consumed int, err error) {
err = fmt.Errorf("need literal after 'byte %s'", arg)
return
}
- val, err = base32DecdodeAnyPadding(args[1])
+ val, err = base32DecodeAnyPadding(args[1])
if err != nil {
return
}
@@ -696,6 +753,29 @@ func asmByte(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 0 {
return ops.errorf("%s operation needs byte literal argument", spec.Name)
}
+
+ // After backBranchEnabledVersion, control flow is confusing, so if there's
+ // a manual cblock, use push instead of trying to use what's given.
+ if ops.cntBytecBlock > 0 && ops.Version >= backBranchEnabledVersion {
+ // We don't understand control-flow, so use pushbytes
+ ops.warnf("byte %s used with explicit bytecblock. must pushbytes", args[0])
+ pushbytes := OpsByName[ops.Version]["pushbytes"]
+ return asmPushBytes(ops, &pushbytes, args)
+ }
+
+ // There are no backjumps, but there are multiple cblocks. Maybe one is
+ // conditional skipped. Too confusing.
+ if ops.cntBytecBlock > 1 {
+ pushbytes, ok := OpsByName[ops.Version]["pushbytes"]
+ if ok {
+ return asmPushBytes(ops, &pushbytes, args)
+ }
+ return ops.errorf("byte %s used with manual bytecblocks. Use bytec.", args[0])
+ }
+
+ // In both of the above clauses, we _could_ track whether a particular
+ // bytecblock dominates the current instruction. If so, we could use it.
+
val, consumed, err := parseBinaryArgs(args)
if err != nil {
return ops.error(err)
@@ -723,7 +803,7 @@ func asmMethod(ops *OpStream, spec *OpSpec, args []string) error {
if err != nil {
// Warn if an invalid signature is used. Don't return an error, since the ABI is not
// governed by the core protocol, so there may be changes to it that we don't know about
- ops.warnf("Invalid ARC-4 ABI method signature for method op: %s", err.Error()) // nolint:errcheck
+ ops.warnf("Invalid ARC-4 ABI method signature for method op: %s", err.Error())
}
hash := sha512.Sum512_256(methodSig)
ops.ByteLiteral(hash[0:4])
@@ -734,11 +814,10 @@ func asmMethod(ops *OpStream, spec *OpSpec, args []string) error {
func asmIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
ops.pending.WriteByte(spec.Opcode)
+ ivals := make([]uint64, len(args))
var scratch [binary.MaxVarintLen64]byte
l := binary.PutUvarint(scratch[:], uint64(len(args)))
ops.pending.Write(scratch[:l])
- ops.intcRefs = nil
- ops.intc = make([]uint64, len(args))
for i, xs := range args {
cu, err := strconv.ParseUint(xs, 0, 64)
if err != nil {
@@ -746,9 +825,21 @@ func asmIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
}
l = binary.PutUvarint(scratch[:], cu)
ops.pending.Write(scratch[:l])
- ops.intc[i] = cu
+ if !ops.known.deadcode {
+ ivals[i] = cu
+ }
+ }
+ if !ops.known.deadcode {
+ // If we previously processed an `int`, we thought we could insert our
+ // own intcblock, but now we see a manual one.
+ if ops.hasPseudoInt {
+ ops.error("intcblock following int")
+ }
+ ops.intcRefs = nil
+ ops.intc = ivals
+ ops.cntIntcBlock++
}
- ops.hasIntcBlock = true
+
return nil
}
@@ -763,8 +854,7 @@ func asmByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
// intcblock, but parseBinaryArgs would have
// to return a useful consumed value even in
// the face of errors. Hard.
- ops.error(err)
- return nil
+ return ops.error(err)
}
bvals = append(bvals, val)
rest = rest[consumed:]
@@ -777,9 +867,16 @@ func asmByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
ops.pending.Write(scratch[:l])
ops.pending.Write(bv)
}
- ops.bytecRefs = nil
- ops.bytec = bvals
- ops.hasBytecBlock = true
+ if !ops.known.deadcode {
+ // If we previously processed a pseudo `byte`, we thought we could
+ // insert our own bytecblock, but now we see a manual one.
+ if ops.hasPseudoByte {
+ ops.error("bytecblock following byte/addr/method")
+ }
+ ops.bytecRefs = nil
+ ops.bytec = bvals
+ ops.cntBytecBlock++
+ }
return nil
}
@@ -801,7 +898,7 @@ func asmArg(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
return ops.error("arg operation needs one argument")
}
- val, err := simpleImm(args[0], "argument")
+ val, err := byteImm(args[0], "argument")
if err != nil {
return ops.error(err)
}
@@ -827,7 +924,7 @@ func asmBranch(ops *OpStream, spec *OpSpec, args []string) error {
return ops.error("branch operation needs label argument")
}
- ops.referToLabel(ops.pending.Len(), args[0])
+ ops.referToLabel(ops.pending.Len()+1, args[0], ops.pending.Len()+spec.Size)
ops.pending.WriteByte(spec.Opcode)
// zero bytes will get replaced with actual offset in resolveLabels()
ops.pending.WriteByte(0)
@@ -835,6 +932,23 @@ func asmBranch(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
+func asmSwitch(ops *OpStream, spec *OpSpec, args []string) error {
+ numOffsets := len(args)
+ if numOffsets > math.MaxUint8 {
+ return ops.errorf("%s cannot take more than 255 labels", spec.Name)
+ }
+ ops.pending.WriteByte(spec.Opcode)
+ ops.pending.WriteByte(byte(numOffsets))
+ opEndPos := ops.pending.Len() + 2*numOffsets
+ for _, arg := range args {
+ ops.referToLabel(ops.pending.Len(), arg, opEndPos)
+ // zero bytes will get replaced with actual offset in resolveLabels()
+ ops.pending.WriteByte(0)
+ ops.pending.WriteByte(0)
+ }
+ return nil
+}
+
func asmSubstring(ops *OpStream, spec *OpSpec, args []string) error {
err := asmDefault(ops, spec, args)
if err != nil {
@@ -849,7 +963,7 @@ func asmSubstring(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-func simpleImm(value string, label string) (byte, error) {
+func byteImm(value string, label string) (byte, error) {
res, err := strconv.ParseUint(value, 0, 64)
if err != nil {
return 0, fmt.Errorf("unable to parse %s %#v as integer", label, value)
@@ -860,6 +974,14 @@ func simpleImm(value string, label string) (byte, error) {
return byte(res), err
}
+func int8Imm(value string, label string) (byte, error) {
+ res, err := strconv.ParseInt(value, 10, 8)
+ if err != nil {
+ return 0, fmt.Errorf("unable to parse %s %#v as int8", label, value)
+ }
+ return byte(res), err
+}
+
func asmItxn(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 1 {
return asmDefault(ops, spec, args)
@@ -923,7 +1045,7 @@ func asmDefault(ops *OpStream, spec *OpSpec, args []string) error {
if imm.Group != nil {
fs, ok := imm.Group.SpecByName(args[i])
if !ok {
- _, err := simpleImm(args[i], "")
+ _, err := byteImm(args[i], "")
if err == nil {
// User supplied a uint, so we see if any of the other immediates take uints
for j, otherImm := range spec.OpDetails.Immediates {
@@ -966,7 +1088,7 @@ func asmDefault(ops *OpStream, spec *OpSpec, args []string) error {
ops.pending.WriteByte(fs.Field())
} else {
// simple immediate that must be a number from 0-255
- val, err := simpleImm(args[i], imm.Name)
+ val, err := byteImm(args[i], imm.Name)
if err != nil {
if strings.Contains(err.Error(), "unable to parse") {
// Perhaps the field works in a different order
@@ -989,6 +1111,12 @@ func asmDefault(ops *OpStream, spec *OpSpec, args []string) error {
}
ops.pending.WriteByte(val)
}
+ case immInt8:
+ val, err := int8Imm(args[i], imm.Name)
+ if err != nil {
+ return ops.errorf("%s %w", spec.Name, err)
+ }
+ ops.pending.WriteByte(val)
default:
return ops.errorf("unable to assemble immKind %d", imm.kind)
}
@@ -996,72 +1124,169 @@ func asmDefault(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-// Interprets the arg at index argIndex as byte-long immediate
-func getByteImm(args []string, argIndex int) (byte, bool) {
+// getImm interprets the arg at index argIndex as an immediate
+func getImm(args []string, argIndex int) (int, bool) {
if len(args) <= argIndex {
return 0, false
}
- n, err := strconv.ParseUint(args[argIndex], 0, 8)
+ // We want to parse anything from -128 up to 255. So allow 9 bits.
+ // Normal assembly checking will catch signed as byte, vice versa
+ n, err := strconv.ParseInt(args[argIndex], 0, 9)
if err != nil {
return 0, false
}
- return byte(n), true
+ return int(n), true
}
-func typeSwap(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
- topTwo := StackTypes{StackAny, StackAny}
+func anyTypes(n int) StackTypes {
+ as := make(StackTypes, n)
+ for i := range as {
+ as[i] = StackAny
+ }
+ return as
+}
+
+func typeSwap(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+ swapped := StackTypes{StackAny, StackAny}
top := len(pgm.stack) - 1
if top >= 0 {
- topTwo[1] = pgm.stack[top]
+ swapped[0] = pgm.stack[top]
if top >= 1 {
- topTwo[0] = pgm.stack[top-1]
+ swapped[1] = pgm.stack[top-1]
}
}
- reversed := StackTypes{topTwo[1], topTwo[0]}
- return nil, reversed
+ return nil, swapped, nil
}
-func typeDig(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
- n, ok := getByteImm(args, 0)
+func typeDig(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+ n, ok := getImm(args, 0)
if !ok {
- return nil, nil
+ return nil, nil, nil
}
- depth := int(n) + 1
- anys := make(StackTypes, depth)
- returns := make(StackTypes, depth+1)
- for i := range anys {
- anys[i] = StackAny
- returns[i] = StackAny
- }
- returns[depth] = StackAny
+ depth := n + 1
+ returns := anyTypes(depth + 1)
idx := len(pgm.stack) - depth
if idx >= 0 {
+ // We return exactly what on the stack...
+ copy(returns[:], pgm.stack[idx:])
+ // plus a repeat of what was at idx
returns[len(returns)-1] = pgm.stack[idx]
- for i := idx; i < len(pgm.stack); i++ {
- returns[i-idx] = pgm.stack[i]
+ }
+ return anyTypes(depth), returns, nil
+}
+
+func typeBury(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+ n, ok := getImm(args, 0)
+ if !ok {
+ return nil, nil, nil
+ }
+ if n == 0 {
+ return nil, nil, errors.New("bury 0 always fails")
+ }
+
+ top := len(pgm.stack) - 1
+ typ, ok := pgm.top()
+ if !ok {
+ return nil, nil, nil // Will error because bury demands a stack arg
+ }
+
+ idx := top - n
+ if idx < 0 {
+ if pgm.bottom == StackNone {
+ // By demanding n+1 elements, we'll trigger an error
+ return anyTypes(n + 1), nil, nil
}
+ // We're going to bury below the tracked portion of the stack, so there's
+ // nothing to update.
+ return nil, nil, nil
+ }
+
+ returns := make(StackTypes, n)
+ copy(returns, pgm.stack[idx:]) // Won't have room to copy the top type
+ returns[0] = typ // Replace the bottom with the top type
+ return pgm.stack[idx:], returns, nil
+}
+
+func typeFrameDig(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+ n, ok := getImm(args, 0)
+ if !ok {
+ return nil, nil, nil
+ }
+ // If we have no frame pointer, we can't do better than "any"
+ if pgm.fp == -1 {
+ return nil, nil, nil
+ }
+
+ // If we do have a framepointer, we can try to get the type
+ idx := pgm.fp + n
+ if idx < 0 {
+ return nil, nil, fmt.Errorf("frame_dig %d in sub with %d args", n, pgm.fp)
+ }
+ if idx >= len(pgm.stack) {
+ return nil, nil, fmt.Errorf("frame_dig above stack")
+ }
+ return nil, StackTypes{pgm.stack[idx]}, nil
+}
+
+func typeFrameBury(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+ n, ok := getImm(args, 0)
+ if !ok {
+ return nil, nil, nil
+ }
+
+ top := len(pgm.stack) - 1
+ typ, ok := pgm.top()
+ if !ok {
+ return nil, nil, nil // Will error because fbury demands a stack arg
+ }
+
+ // If we have no frame pointer, we have to wipe out any belief that the
+ // stack contains anything but the supplied type.
+ if pgm.fp == -1 {
+ // Perhaps it would be cleaner to build up the args, return slices to
+ // cause this, rather than manipulate the pgm.stack directly.
+ for i := range pgm.stack {
+ if pgm.stack[i] != typ {
+ pgm.stack[i] = StackAny
+ }
+ }
+ return nil, nil, nil
+ }
+
+ // If we do have a framepointer, we can try to update the typestack
+ idx := pgm.fp + n
+ if idx < 0 {
+ return nil, nil, fmt.Errorf("frame_bury %d in sub with %d args", n, pgm.fp)
+ }
+ if idx >= top {
+ return nil, nil, fmt.Errorf("frame_bury above stack")
}
- return anys, returns
+ depth := top - idx
+
+ returns := make(StackTypes, depth)
+ copy(returns, pgm.stack[idx:]) // Won't have room to copy the top type
+ returns[0] = typ // Replace the bottom with the top type
+ return pgm.stack[idx:], returns, nil
}
-func typeEquals(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeEquals(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
top := len(pgm.stack) - 1
if top >= 0 {
//Require arg0 and arg1 to have same type
- return StackTypes{pgm.stack[top], pgm.stack[top]}, nil
+ return StackTypes{pgm.stack[top], pgm.stack[top]}, nil, nil
}
- return nil, nil
+ return nil, nil, nil
}
-func typeDup(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeDup(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
top := len(pgm.stack) - 1
if top >= 0 {
- return StackTypes{pgm.stack[top]}, StackTypes{pgm.stack[top], pgm.stack[top]}
+ return nil, StackTypes{pgm.stack[top], pgm.stack[top]}, nil
}
- return nil, nil
+ return nil, nil, nil
}
-func typeDupTwo(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeDupTwo(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
topTwo := StackTypes{StackAny, StackAny}
top := len(pgm.stack) - 1
if top >= 0 {
@@ -1070,41 +1295,35 @@ func typeDupTwo(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
topTwo[0] = pgm.stack[top-1]
}
}
- return nil, append(topTwo, topTwo...)
+ return nil, append(topTwo, topTwo...), nil
}
-func typeSelect(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeSelect(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
top := len(pgm.stack) - 1
if top >= 2 {
if pgm.stack[top-1] == pgm.stack[top-2] {
- return nil, StackTypes{pgm.stack[top-1]}
+ return nil, StackTypes{pgm.stack[top-1]}, nil
}
}
- return nil, nil
+ return nil, nil, nil
}
-func typeSetBit(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeSetBit(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
top := len(pgm.stack) - 1
if top >= 2 {
- return nil, StackTypes{pgm.stack[top-2]}
+ return nil, StackTypes{pgm.stack[top-2]}, nil
}
- return nil, nil
+ return nil, nil, nil
}
-func typeCover(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
- n, ok := getByteImm(args, 0)
+func typeCover(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+ n, ok := getImm(args, 0)
if !ok {
- return nil, nil
+ return nil, nil, nil
}
depth := int(n) + 1
- anys := make(StackTypes, depth)
- for i := range anys {
- anys[i] = StackAny
- }
- returns := make(StackTypes, depth)
- for i := range returns {
- returns[i] = StackAny
- }
+ returns := anyTypes(depth)
+
idx := len(pgm.stack) - depth
// This rotates all the types if idx is >= 0. But there's a potential
// improvement: when pgm.bottom is StackAny, and the cover is going "under"
@@ -1116,23 +1335,16 @@ func typeCover(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
returns[i-idx+1] = pgm.stack[i]
}
}
- return anys, returns
+ return anyTypes(depth), returns, nil
}
-func typeUncover(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
- n, ok := getByteImm(args, 0)
+func typeUncover(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+ n, ok := getImm(args, 0)
if !ok {
- return nil, nil
- }
- depth := int(n) + 1
- anys := make(StackTypes, depth)
- for i := range anys {
- anys[i] = StackAny
- }
- returns := make(StackTypes, depth)
- for i := range returns {
- returns[i] = StackAny
+ return nil, nil, nil
}
+ depth := n + 1
+ returns := anyTypes(depth)
idx := len(pgm.stack) - depth
// See precision comment in typeCover
if idx >= 0 {
@@ -1141,36 +1353,36 @@ func typeUncover(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes)
returns[i-idx-1] = pgm.stack[i]
}
}
- return anys, returns
+ return anyTypes(depth), returns, nil
}
-func typeTxField(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeTxField(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
if len(args) != 1 {
- return nil, nil
+ return nil, nil, nil
}
fs, ok := txnFieldSpecByName[args[0]]
if !ok {
- return nil, nil
+ return nil, nil, nil
}
- return StackTypes{fs.ftype}, nil
+ return StackTypes{fs.ftype}, nil, nil
}
-func typeStore(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
- scratchIndex, ok := getByteImm(args, 0)
+func typeStore(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+ scratchIndex, ok := getImm(args, 0)
if !ok {
- return nil, nil
+ return nil, nil, nil
}
top := len(pgm.stack) - 1
if top >= 0 {
pgm.scratchSpace[scratchIndex] = pgm.stack[top]
}
- return nil, nil
+ return nil, nil, nil
}
-func typeStores(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeStores(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
top := len(pgm.stack) - 1
if top < 0 {
- return nil, nil
+ return nil, nil, nil
}
for i := range pgm.scratchSpace {
// We can't know what slot stacktop is being stored in, but we can at least keep the slots that are the same type as stacktop
@@ -1178,26 +1390,68 @@ func typeStores(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
pgm.scratchSpace[i] = StackAny
}
}
- return nil, nil
+ return nil, nil, nil
}
-func typeLoad(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
- scratchIndex, ok := getByteImm(args, 0)
+func typeLoad(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+ scratchIndex, ok := getImm(args, 0)
if !ok {
- return nil, nil
+ return nil, nil, nil
}
- return nil, StackTypes{pgm.scratchSpace[scratchIndex]}
+ return nil, StackTypes{pgm.scratchSpace[scratchIndex]}, nil
}
-func typeLoads(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeProto(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+ a, aok := getImm(args, 0)
+ _, rok := getImm(args, 1)
+ if !aok || !rok {
+ return nil, nil, nil
+ }
+
+ if len(pgm.stack) != 0 || pgm.bottom != StackAny {
+ return nil, nil, fmt.Errorf("proto must be unreachable from previous PC")
+ }
+ pgm.stack = anyTypes(a)
+ pgm.fp = a
+ return nil, nil, nil
+}
+
+func typeLoads(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
scratchType := pgm.scratchSpace[0]
for _, item := range pgm.scratchSpace {
// If all the scratch slots are one type, then we can say we are loading that type
if item != scratchType {
- return nil, nil
+ return nil, nil, nil
}
}
- return nil, StackTypes{scratchType}
+ return nil, StackTypes{scratchType}, nil
+}
+
+func typePopN(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+ n, ok := getImm(args, 0)
+ if !ok {
+ return nil, nil, nil
+ }
+ return anyTypes(n), nil, nil
+}
+
+func typeDupN(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+ n, ok := getImm(args, 0)
+ if !ok {
+ return nil, nil, nil
+ }
+ top := len(pgm.stack) - 1
+ if top < 0 {
+ return nil, nil, nil
+ }
+
+ // `dupn 3` ends up with 4 copies of ToS on top
+ copies := make(StackTypes, n+1)
+ for i := range copies {
+ copies[i] = pgm.stack[top]
+ }
+
+ return nil, copies, nil
}
func joinIntsOnOr(singularTerminator string, list ...int) string {
@@ -1399,25 +1653,29 @@ func typecheck(expected, got StackType) bool {
return expected == got
}
-var spaces = [256]uint8{'\t': 1, ' ': 1}
+// newline not included since handled in scanner
+var tokenSeparators = [256]bool{'\t': true, ' ': true, ';': true}
-func fieldsFromLine(line string) []string {
- var fields []string
+func tokensFromLine(line string) []string {
+ var tokens []string
i := 0
- for i < len(line) && spaces[line[i]] != 0 {
+ for i < len(line) && tokenSeparators[line[i]] {
+ if line[i] == ';' {
+ tokens = append(tokens, ";")
+ }
i++
}
start := i
- inString := false
- inBase64 := false
+ inString := false // tracked to allow spaces and comments inside
+ inBase64 := false // tracked to allow '//' inside
for i < len(line) {
- if spaces[line[i]] == 0 { // if not space
+ if !tokenSeparators[line[i]] { // if not space
switch line[i] {
case '"': // is a string literal?
if !inString {
- if i == 0 || i > 0 && spaces[line[i-1]] != 0 {
+ if i == 0 || i > 0 && tokenSeparators[line[i-1]] {
inString = true
}
} else {
@@ -1428,9 +1686,9 @@ func fieldsFromLine(line string) []string {
case '/': // is a comment?
if i < len(line)-1 && line[i+1] == '/' && !inBase64 && !inString {
if start != i { // if a comment without whitespace
- fields = append(fields, line[start:i])
+ tokens = append(tokens, line[start:i])
}
- return fields
+ return tokens
}
case '(': // is base64( seq?
prefix := line[start:i]
@@ -1446,19 +1704,29 @@ func fieldsFromLine(line string) []string {
i++
continue
}
+
+ // we've hit a space, end last token unless inString
+
if !inString {
- field := line[start:i]
- fields = append(fields, field)
- if field == "base64" || field == "b64" {
- inBase64 = true
- } else if inBase64 {
+ token := line[start:i]
+ tokens = append(tokens, token)
+ if line[i] == ';' {
+ tokens = append(tokens, ";")
+ }
+ if inBase64 {
inBase64 = false
+ } else if token == "base64" || token == "b64" {
+ inBase64 = true
}
}
i++
+ // gobble up consecutive whitespace (but notice semis)
if !inString {
- for i < len(line) && spaces[line[i]] != 0 {
+ for i < len(line) && tokenSeparators[line[i]] {
+ if line[i] == ';' {
+ tokens = append(tokens, ";")
+ }
i++
}
start = i
@@ -1467,10 +1735,10 @@ func fieldsFromLine(line string) []string {
// add rest of the string if any
if start < len(line) {
- fields = append(fields, line[start:i])
+ tokens = append(tokens, line[start:i])
}
- return fields
+ return tokens
}
func (ops *OpStream) trace(format string, args ...interface{}) {
@@ -1480,9 +1748,9 @@ func (ops *OpStream) trace(format string, args ...interface{}) {
fmt.Fprintf(ops.Trace, format, args...)
}
-func (ops *OpStream) typeError(err error) {
+func (ops *OpStream) typeErrorf(format string, args ...interface{}) {
if ops.typeTracking {
- ops.error(err)
+ ops.errorf(format, args...)
}
}
@@ -1494,9 +1762,8 @@ func (ops *OpStream) trackStack(args StackTypes, returns StackTypes, instruction
}
argcount := len(args)
if argcount > len(ops.known.stack) && ops.known.bottom == StackNone {
- err := fmt.Errorf("%s expects %d stack arguments but stack height is %d",
+ ops.typeErrorf("%s expects %d stack arguments but stack height is %d",
strings.Join(instruction, " "), argcount, len(ops.known.stack))
- ops.typeError(err)
} else {
firstPop := true
for i := argcount - 1; i >= 0; i-- {
@@ -1509,9 +1776,8 @@ func (ops *OpStream) trackStack(args StackTypes, returns StackTypes, instruction
ops.trace(", %s", argType)
}
if !typecheck(argType, stype) {
- err := fmt.Errorf("%s arg %d wanted type %s got %s",
+ ops.typeErrorf("%s arg %d wanted type %s got %s",
strings.Join(instruction, " "), i, argType, stype)
- ops.typeError(err)
}
}
if !firstPop {
@@ -1531,6 +1797,16 @@ func (ops *OpStream) trackStack(args StackTypes, returns StackTypes, instruction
}
}
+// splitTokens breaks tokens into two slices at the first semicolon.
+func splitTokens(tokens []string) (current, rest []string) {
+ for i, token := range tokens {
+ if token == ";" {
+ return tokens[:i], tokens[i+1:]
+ }
+ }
+ return tokens, nil
+}
+
// assemble reads text from an input and accumulates the program
func (ops *OpStream) assemble(text string) error {
fin := strings.NewReader(text)
@@ -1541,74 +1817,85 @@ func (ops *OpStream) assemble(text string) error {
for scanner.Scan() {
ops.sourceLine++
line := scanner.Text()
- line = strings.TrimSpace(line)
- if len(line) == 0 {
- ops.trace("%3d: 0 line\n", ops.sourceLine)
- continue
- }
- if strings.HasPrefix(line, "//") {
- ops.trace("%3d: // line\n", ops.sourceLine)
- continue
- }
- if strings.HasPrefix(line, "#pragma") {
- ops.trace("%3d: #pragma line\n", ops.sourceLine)
- ops.pragma(line)
- continue
- }
- fields := fieldsFromLine(line)
- if len(fields) == 0 {
- ops.trace("%3d: no fields\n", ops.sourceLine)
- continue
- }
- // we're about to begin processing opcodes, so settle the Version
- if ops.Version == assemblerNoVersion {
- ops.Version = AssemblerDefaultVersion
- }
- if ops.versionedPseudoOps == nil {
- ops.versionedPseudoOps = prepareVersionedPseudoTable(ops.Version)
- }
- opstring := fields[0]
- if opstring[len(opstring)-1] == ':' {
- ops.createLabel(opstring[:len(opstring)-1])
- fields = fields[1:]
- if len(fields) == 0 {
- ops.trace("%3d: label only\n", ops.sourceLine)
+ tokens := tokensFromLine(line)
+ if len(tokens) > 0 {
+ if first := tokens[0]; first[0] == '#' {
+ directive := first[1:]
+ switch directive {
+ case "pragma":
+ ops.pragma(tokens) //nolint:errcheck // report bad pragma line error, but continue assembling
+ ops.trace("%3d: #pragma line\n", ops.sourceLine)
+ default:
+ ops.errorf("Unknown directive: %s", directive)
+ }
continue
}
- opstring = fields[0]
}
- spec, expandedName, ok := getSpec(ops, opstring, fields[1:])
- if ok {
- ops.trace("%3d: %s\t", ops.sourceLine, opstring)
- ops.recordSourceLine()
- if spec.Modes == modeApp {
- ops.HasStatefulOps = true
+ for current, next := splitTokens(tokens); len(current) > 0 || len(next) > 0; current, next = splitTokens(next) {
+ if len(current) == 0 {
+ continue
}
- args, returns := spec.Arg.Types, spec.Return.Types
- if spec.refine != nil {
- nargs, nreturns := spec.refine(&ops.known, fields[1:])
- if nargs != nil {
- args = nargs
- }
- if nreturns != nil {
- returns = nreturns
- }
+ // we're about to begin processing opcodes, so settle the Version
+ if ops.Version == assemblerNoVersion {
+ ops.Version = AssemblerDefaultVersion
+ }
+ if ops.versionedPseudoOps == nil {
+ ops.versionedPseudoOps = prepareVersionedPseudoTable(ops.Version)
}
- ops.trackStack(args, returns, append([]string{expandedName}, fields[1:]...))
- spec.asm(ops, &spec, fields[1:])
- if spec.deadens() { // An unconditional branch deadens the following code
- ops.known.deaden()
+ opstring := current[0]
+ if opstring[len(opstring)-1] == ':' {
+ ops.createLabel(opstring[:len(opstring)-1])
+ current = current[1:]
+ if len(current) == 0 {
+ ops.trace("%3d: label only\n", ops.sourceLine)
+ continue
+ }
+ opstring = current[0]
}
- if spec.Name == "callsub" {
- // since retsub comes back to the callsub, it is an entry point like a label
- ops.known.label()
+ spec, expandedName, ok := getSpec(ops, opstring, current[1:])
+ if ok {
+ ops.trace("%3d: %s\t", ops.sourceLine, opstring)
+ ops.recordSourceLine()
+ if spec.Modes == modeApp {
+ ops.HasStatefulOps = true
+ }
+ args, returns := spec.Arg.Types, spec.Return.Types
+ if spec.refine != nil {
+ nargs, nreturns, err := spec.refine(&ops.known, current[1:])
+ if err != nil {
+ ops.typeErrorf("%w", err)
+ }
+ if nargs != nil {
+ args = nargs
+ }
+ if nreturns != nil {
+ returns = nreturns
+ }
+ }
+ ops.trackStack(args, returns, append([]string{expandedName}, current[1:]...))
+ spec.asm(ops, &spec, current[1:]) //nolint:errcheck // ignore error and continue, to collect more errors
+
+ if spec.deadens() { // An unconditional branch deadens the following code
+ ops.known.deaden()
+ }
+ if spec.Name == "callsub" {
+ // since retsub comes back to the callsub, it is an entry point like a label
+ ops.known.label()
+ }
}
ops.trace("\n")
continue
}
}
- // backward compatibility: do not allow jumps behind last instruction in v1
+ if err := scanner.Err(); err != nil {
+ if errors.Is(err, bufio.ErrTooLong) {
+ err = errors.New("line too long")
+ }
+ ops.error(err)
+ }
+
+ // backward compatibility: do not allow jumps past last instruction in v1
if ops.Version <= 1 {
for label, dest := range ops.labels {
if dest == ops.pending.Len() {
@@ -1635,21 +1922,20 @@ func (ops *OpStream) assemble(text string) error {
return nil
}
-func (ops *OpStream) pragma(line string) error {
- fields := strings.Split(line, " ")
- if fields[0] != "#pragma" {
- return ops.errorf("invalid syntax: %s", fields[0])
+func (ops *OpStream) pragma(tokens []string) error {
+ if tokens[0] != "#pragma" {
+ return ops.errorf("invalid syntax: %s", tokens[0])
}
- if len(fields) < 2 {
+ if len(tokens) < 2 {
return ops.error("empty pragma")
}
- key := fields[1]
+ key := tokens[1]
switch key {
case "version":
- if len(fields) < 3 {
+ if len(tokens) < 3 {
return ops.error("no version value")
}
- value := fields[2]
+ value := tokens[2]
var ver uint64
if ops.pending.Len() > 0 {
return ops.error("#pragma version is only allowed before instructions")
@@ -1674,10 +1960,10 @@ func (ops *OpStream) pragma(line string) error {
}
return nil
case "typetrack":
- if len(fields) < 3 {
+ if len(tokens) < 3 {
return ops.error("no typetrack value")
}
- value := fields[2]
+ value := tokens[2]
on, err := strconv.ParseBool(value)
if err != nil {
return ops.errorf("bad #pragma typetrack: %#v", value)
@@ -1708,19 +1994,20 @@ func (ops *OpStream) resolveLabels() {
reported[lr.label] = true
continue
}
- // all branch instructions (currently) are opcode byte and 2 offset bytes, and the destination is relative to the next pc as if the branch was a no-op
- naturalPc := lr.position + 3
- if ops.Version < backBranchEnabledVersion && dest < naturalPc {
+
+ // All branch targets are encoded as 2 offset bytes. The destination is relative to the end of the
+ // instruction they appear in, which is available in lr.offsetPostion
+ if ops.Version < backBranchEnabledVersion && dest < lr.offsetPosition {
ops.errorf("label %#v is a back reference, back jump support was introduced in v4", lr.label)
continue
}
- jump := dest - naturalPc
+ jump := dest - lr.offsetPosition
if jump > 0x7fff {
ops.errorf("label %#v is too far away", lr.label)
continue
}
- raw[lr.position+1] = uint8(jump >> 8)
- raw[lr.position+2] = uint8(jump & 0x0ff)
+ raw[lr.position] = uint8(jump >> 8)
+ raw[lr.position+1] = uint8(jump & 0x0ff)
}
ops.pending = *bytes.NewBuffer(raw)
ops.sourceLine = saved
@@ -1769,7 +2056,7 @@ func replaceBytes(s []byte, index, originalLen int, newBytes []byte) []byte {
// This function only optimizes constants introduces by the int pseudo-op, not
// preexisting intcblocks in the code.
func (ops *OpStream) optimizeIntcBlock() error {
- if ops.hasIntcBlock {
+ if ops.cntIntcBlock > 0 {
// don't optimize an existing intcblock, only int pseudo-ops
return nil
}
@@ -1812,7 +2099,7 @@ func (ops *OpStream) optimizeIntcBlock() error {
// This function only optimizes constants introduces by the byte or addr
// pseudo-ops, not preexisting bytecblocks in the code.
func (ops *OpStream) optimizeBytecBlock() error {
- if ops.hasBytecBlock {
+ if ops.cntBytecBlock > 0 {
// don't optimize an existing bytecblock, only byte/addr pseudo-ops
return nil
}
@@ -1954,6 +2241,7 @@ func (ops *OpStream) optimizeConstants(refs []constReference, constBlock []inter
for i := range ops.labelReferences {
if ops.labelReferences[i].position > position {
ops.labelReferences[i].position += positionDelta
+ ops.labelReferences[i].offsetPosition += positionDelta
}
}
@@ -1987,8 +2275,8 @@ func (ops *OpStream) prependCBlocks() []byte {
prebytes := bytes.Buffer{}
vlen := binary.PutUvarint(scratch[:], ops.Version)
prebytes.Write(scratch[:vlen])
- if len(ops.intc) > 0 && !ops.hasIntcBlock {
- prebytes.WriteByte(0x20) // intcblock
+ if len(ops.intc) > 0 && ops.cntIntcBlock == 0 {
+ prebytes.WriteByte(OpsByName[ops.Version]["intcblock"].Opcode)
vlen := binary.PutUvarint(scratch[:], uint64(len(ops.intc)))
prebytes.Write(scratch[:vlen])
for _, iv := range ops.intc {
@@ -1996,8 +2284,8 @@ func (ops *OpStream) prependCBlocks() []byte {
prebytes.Write(scratch[:vlen])
}
}
- if len(ops.bytec) > 0 && !ops.hasBytecBlock {
- prebytes.WriteByte(0x26) // bytecblock
+ if len(ops.bytec) > 0 && ops.cntBytecBlock == 0 {
+ prebytes.WriteByte(OpsByName[ops.Version]["bytecblock"].Opcode)
vlen := binary.PutUvarint(scratch[:], uint64(len(ops.bytec)))
prebytes.Write(scratch[:vlen])
for _, bv := range ops.bytec {
@@ -2163,7 +2451,7 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
for _, imm := range spec.OpDetails.Immediates {
out += " "
switch imm.kind {
- case immByte:
+ case immByte, immInt8:
if pc >= len(dis.program) {
return "", fmt.Errorf("program end while reading immediate %s for %s",
imm.Name, spec.Name)
@@ -2179,7 +2467,11 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
}
out += name
} else {
- out += fmt.Sprintf("%d", b)
+ if imm.kind == immByte {
+ out += fmt.Sprintf("%d", b)
+ } else if imm.kind == immInt8 {
+ out += fmt.Sprintf("%d", int8(b))
+ }
}
if spec.Name == "intc" && int(b) < len(dis.intc) {
out += fmt.Sprintf(" // %d", dis.intc[b])
@@ -2190,11 +2482,8 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
pc++
case immLabel:
- offset := (uint(dis.program[pc]) << 8) | uint(dis.program[pc+1])
- target := int(offset) + pc + 2
- if target > 0xffff {
- target -= 0x10000
- }
+ offset := decodeBranchOffset(dis.program, pc)
+ target := offset + pc + 2
var label string
if dis.numericTargets {
label = fmt.Sprintf("%d", target)
@@ -2235,7 +2524,7 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
return "", err
}
- dis.intc = append(dis.intc, intc...)
+ dis.intc = intc
for i, iv := range intc {
if i != 0 {
out += " "
@@ -2248,7 +2537,7 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
if err != nil {
return "", err
}
- dis.bytec = append(dis.bytec, bytec...)
+ dis.bytec = bytec
for i, bv := range bytec {
if i != 0 {
out += " "
@@ -2256,6 +2545,30 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
out += fmt.Sprintf("0x%s", hex.EncodeToString(bv))
}
pc = nextpc
+ case immLabels:
+ targets, nextpc, err := parseSwitch(dis.program, pc)
+ if err != nil {
+ return "", err
+ }
+
+ var labels []string
+ for _, target := range targets {
+ var label string
+ if dis.numericTargets {
+ label = fmt.Sprintf("%d", target)
+ } else {
+ if known, ok := dis.pendingLabels[target]; ok {
+ label = known
+ } else {
+ dis.labelCount++
+ label = fmt.Sprintf("label%d", dis.labelCount)
+ dis.putLabel(label, target)
+ }
+ }
+ labels = append(labels, label)
+ }
+ out += strings.Join(labels, " ")
+ pc = nextpc
default:
return "", fmt.Errorf("unknown immKind %d", imm.kind)
}
@@ -2409,6 +2722,20 @@ func checkByteConstBlock(cx *EvalContext) error {
return nil
}
+func parseSwitch(program []byte, pos int) (targets []int, nextpc int, err error) {
+ numOffsets := int(program[pos])
+ pos++
+ end := pos + 2*numOffsets // end of op: offset is applied to this position
+ for i := 0; i < numOffsets; i++ {
+ offset := decodeBranchOffset(program, pos)
+ target := end + offset
+ targets = append(targets, target)
+ pos += 2
+ }
+ nextpc = pos
+ return
+}
+
func allPrintableASCII(bytes []byte) bool {
for _, b := range bytes {
if b < 32 || b > 126 {
diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go
index 24d9dffd0..1adf9b450 100644
--- a/data/transactions/logic/assembler_test.go
+++ b/data/transactions/logic/assembler_test.go
@@ -17,8 +17,10 @@
package logic
import (
+ "bytes"
"encoding/hex"
"fmt"
+ "regexp"
"strings"
"testing"
@@ -31,7 +33,8 @@ import (
)
// used by TestAssemble and others, see UPDATE PROCEDURE in TestAssemble()
-const v1Nonsense = `err
+const v1Nonsense = `
+err
global MinTxnFee
global MinBalance
global MaxTxnLife
@@ -117,6 +120,8 @@ intc 1
intc 1
!
%
+|
+&
^
~
byte 0x4242
@@ -342,6 +347,7 @@ byte 0x0123456789abcd
dup
dup
ecdsa_pk_recover Secp256k1
+itxn Sender
itxna Logs 3
`
@@ -393,16 +399,30 @@ pushint 1
replace3
`
-const v8Nonsense = v7Nonsense + pairingNonsense
+const switchNonsense = `
+switch_label0:
+pushint 1
+switch switch_label0 switch_label1
+switch_label1:
+pushint 1
+`
-const v6Compiled = "2004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f23102311231223132314181b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b53a03b6b7043cb8033a0c2349c42a9631007300810881088120978101c53a8101c6003a"
+const v8Nonsense = v7Nonsense + switchNonsense + frameNonsense
+
+const v9Nonsense = v8Nonsense + pairingNonsense
+
+const v6Compiled = "2004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f2310231123122313231418191a1b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b400b53a03b6b7043cb8033a0c2349c42a9631007300810881088120978101c53a8101c6003a"
const randomnessCompiled = "81ffff03d101d000"
const v7Compiled = v6Compiled + "5e005f018120af060180070123456789abcd49490501988003012345494984" +
randomnessCompiled + "800243218001775c0280018881015d"
-const v8Compiled = v7Compiled + pairingCompiled
+const switchCompiled = "81018d02fff800008101"
+
+const v8Compiled = v7Compiled + switchCompiled + frameCompiled
+
+const v9Compiled = v7Compiled + pairingCompiled
var nonsense = map[uint64]string{
1: v1Nonsense,
@@ -413,14 +433,15 @@ var nonsense = map[uint64]string{
6: v6Nonsense,
7: v7Nonsense,
8: v8Nonsense,
+ 9: v9Nonsense,
}
var compiled = map[uint64]string{
- 1: "012008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b1716154000032903494",
- 2: "022008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f",
- 3: "032008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f4478222105531421055427042106552105082106564c4d4b02210538212106391c0081e80780046a6f686e",
- 4: "042004010200b7a60c26040242420c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f23102311231223132314181b1c28171615400003290349483403350222231d4a484848482a50512a632223524100034200004322602261222b634848222862482864286548482228236628226724286828692422700048482471004848361c0037001a0031183119311b311d311e311f312024221e312131223123312431253126312731283129312a312b312c312d312e312f44782522531422542b2355220823564c4d4b0222382123391c0081e80780046a6f686e2281d00f24231f880003420001892223902291922394239593a0a1a2a3a4a5a6a7a8a9aaabacadae23af3a00003b003c003d8164",
- 5: "052004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f23102311231223132314181b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b53a03",
+ 1: "012008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f2310231123122313231418191a1b1c2b1716154000032903494",
+ 2: "022008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f2310231123122313231418191a1b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f",
+ 3: "032008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f2310231123122313231418191a1b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f4478222105531421055427042106552105082106564c4d4b02210538212106391c0081e80780046a6f686e",
+ 4: "042004010200b7a60c26040242420c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f2310231123122313231418191a1b1c28171615400003290349483403350222231d4a484848482a50512a632223524100034200004322602261222b634848222862482864286548482228236628226724286828692422700048482471004848361c0037001a0031183119311b311d311e311f312024221e312131223123312431253126312731283129312a312b312c312d312e312f44782522531422542b2355220823564c4d4b0222382123391c0081e80780046a6f686e2281d00f24231f880003420001892223902291922394239593a0a1a2a3a4a5a6a7a8a9aaabacadae23af3a00003b003c003d8164",
+ 5: "052004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f2310231123122313231418191a1b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b400b53a03",
6: "06" + v6Compiled,
7: "07" + v7Compiled,
8: "08" + v8Compiled,
@@ -453,8 +474,10 @@ func TestAssemble(t *testing.T) {
for v := uint64(2); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
for _, spec := range OpSpecs {
- // Make sure our nonsense covers the ops
- if !strings.Contains(nonsense[v], spec.Name) &&
+ // Make sure our nonsense covers the ops.
+ hasOp, err := regexp.MatchString("\\s"+regexp.QuoteMeta(spec.Name)+"\\s", nonsense[v])
+ require.NoError(t, err)
+ if !hasOp &&
!pseudoOp(spec.Name) && spec.Version <= v {
t.Errorf("v%d nonsense test should contain op %v", v, spec.Name)
}
@@ -465,6 +488,7 @@ func TestAssemble(t *testing.T) {
// time. we must assemble to the same bytes
// this month that we did last month.
expectedBytes, _ := hex.DecodeString(compiled[v])
+ require.NotEmpty(t, expectedBytes)
// the hex is for convenience if the program has been changed. the
// hex string can be copy pasted back in as a new expected result.
require.Equal(t, expectedBytes, ops.Program, hex.EncodeToString(ops.Program))
@@ -479,6 +503,9 @@ var experiments = []uint64{pairingVersion}
// intended to release the opcodes, they should have been removed from
// `experiments`.
func TestExperimental(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
futureV := config.Consensus[protocol.ConsensusFuture].LogicSigVersion
for _, v := range experiments {
// Allows less, so we can push something out, even before vFuture has been updated.
@@ -506,16 +533,21 @@ type Expect struct {
s string
}
-func testMatch(t testing.TB, actual, expected string) bool {
+func testMatch(t testing.TB, actual, expected string) (ok bool) {
+ defer func() {
+ if !ok {
+ t.Logf("'%s' does not match '%s'", actual, expected)
+ }
+ }()
t.Helper()
if strings.HasPrefix(expected, "...") && strings.HasSuffix(expected, "...") {
- return assert.Contains(t, actual, expected[3:len(expected)-3])
+ return strings.Contains(actual, expected[3:len(expected)-3])
} else if strings.HasPrefix(expected, "...") {
- return assert.Contains(t, actual+"^", expected[3:]+"^")
+ return strings.Contains(actual+"^", expected[3:]+"^")
} else if strings.HasSuffix(expected, "...") {
- return assert.Contains(t, "^"+actual, "^"+expected[:len(expected)-3])
+ return strings.Contains("^"+actual, "^"+expected[:len(expected)-3])
} else {
- return assert.Equal(t, expected, actual)
+ return expected == actual
}
}
@@ -552,8 +584,7 @@ func summarize(trace *strings.Builder) string {
func testProg(t testing.TB, source string, ver uint64, expected ...Expect) *OpStream {
t.Helper()
- program := strings.ReplaceAll(source, ";", "\n")
- ops, err := assembleWithTrace(program, ver)
+ ops, err := assembleWithTrace(source, ver)
if len(expected) == 0 {
if len(ops.Errors) > 0 || err != nil || ops == nil || ops.Program == nil {
t.Log(summarize(ops.Trace))
@@ -567,13 +598,13 @@ func testProg(t testing.TB, source string, ver uint64, expected ...Expect) *OpSt
require.NotNil(t, ops.Program)
// It should always be possible to Disassemble
dis, err := Disassemble(ops.Program)
- require.NoError(t, err, program)
+ require.NoError(t, err, source)
// And, while the disassembly may not match input
// exactly, the assembly of the disassembly should
// give the same bytecode
ops2, err := AssembleStringWithVersion(notrack(dis), ver)
if len(ops2.Errors) > 0 || err != nil || ops2 == nil || ops2.Program == nil {
- t.Log(program)
+ t.Log(source)
t.Log(dis)
}
require.Empty(t, ops2.Errors)
@@ -581,19 +612,19 @@ func testProg(t testing.TB, source string, ver uint64, expected ...Expect) *OpSt
require.Equal(t, ops.Program, ops2.Program)
} else {
if err == nil {
- t.Log(program)
+ t.Log(source)
}
require.Error(t, err)
errors := ops.Errors
for _, exp := range expected {
if exp.l == 0 {
- // line 0 means: "must match all"
+ // line 0 means: "must match some line"
require.Len(t, expected, 1)
- fail := false
+ fail := true
for _, err := range errors {
msg := err.Unwrap().Error()
- if !testMatch(t, msg, exp.s) {
- fail = true
+ if testMatch(t, msg, exp.s) {
+ fail = false
}
}
if fail {
@@ -701,9 +732,9 @@ func TestAssembleGlobal(t *testing.T) {
testProg(t, "global MinTxnFee; int 2; +", AssemblerMaxVersion)
testProg(t, "global ZeroAddress; byte 0x12; concat; len", AssemblerMaxVersion)
testProg(t, "global MinTxnFee; byte 0x12; concat", AssemblerMaxVersion,
- Expect{3, "concat arg 0 wanted type []byte..."})
+ Expect{1, "concat arg 0 wanted type []byte..."})
testProg(t, "int 2; global ZeroAddress; +", AssemblerMaxVersion,
- Expect{3, "+ arg 1 wanted type uint64..."})
+ Expect{1, "+ arg 1 wanted type uint64..."})
}
func TestAssembleDefault(t *testing.T) {
@@ -730,7 +761,7 @@ func TestOpUint(t *testing.T) {
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := newOpStream(v)
- ops.Uint(0xcafebabe)
+ ops.IntLiteral(0xcafebabe)
prog := ops.prependCBlocks()
require.NotNil(t, prog)
s := hex.EncodeToString(prog)
@@ -746,9 +777,8 @@ func TestOpUint64(t *testing.T) {
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- t.Parallel()
ops := newOpStream(v)
- ops.Uint(0xcafebabecafebabe)
+ ops.IntLiteral(0xcafebabecafebabe)
prog := ops.prependCBlocks()
require.NotNil(t, prog)
s := hex.EncodeToString(prog)
@@ -769,6 +799,7 @@ func TestOpBytes(t *testing.T) {
require.NotNil(t, prog)
s := hex.EncodeToString(prog)
require.Equal(t, mutateProgVersion(v, "0126010661626364656628"), s)
+ testProg(t, "byte 0x7; len", v, Expect{1, "...odd length hex string"})
})
}
}
@@ -876,6 +907,125 @@ func TestAssembleBytesString(t *testing.T) {
}
}
+func TestManualCBlocks(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // Despite appearing twice, 500s are pushints because of manual intcblock
+ ops := testProg(t, "intcblock 1; int 500; int 500; ==", AssemblerMaxVersion)
+ require.Equal(t, ops.Program[4], OpsByName[ops.Version]["pushint"].Opcode)
+
+ ops = testProg(t, "intcblock 2 3; intcblock 4 10; int 5", AssemblerMaxVersion)
+ text, err := Disassemble(ops.Program)
+ require.NoError(t, err)
+ require.Contains(t, text, "pushint 5")
+
+ ops = testProg(t, "intcblock 2 3; intcblock 4 10; intc_3", AssemblerMaxVersion)
+ text, err = Disassemble(ops.Program)
+ require.NoError(t, err)
+ require.Contains(t, text, "intc_3\n") // That is, no commented value for intc_3 is shown
+
+ // In old straight-line versions, allow mixing int and intc if the ints all
+ // reference manual block. Since conditionals do make it possible that
+ // different cblocks could be in effect depending on earlier path choices,
+ // maybe we should not even allow this.
+ checkSame(t, 3,
+ "intcblock 4 5 1; intc_0; intc_2; +; intc_1; ==",
+ "intcblock 4 5 1; int 4; int 1; +; intc_1; ==",
+ "intcblock 4 5 1; intc_0; int 1; +; int 5; ==")
+ checkSame(t, 3,
+ "bytecblock 0x44 0x55 0x4455; bytec_0; bytec_1; concat; bytec_2; ==",
+ "bytecblock 0x44 0x55 0x4455; byte 0x44; bytec_1; concat; byte 0x4455; ==",
+ "bytecblock 0x44 0x55 0x4455; bytec_0; byte 0x55; concat; bytec_2; ==")
+
+ // But complain if they do not
+ testProg(t, "intcblock 4; int 3;", 3, Expect{1, "int 3 used without 3 in intcblock"})
+ testProg(t, "bytecblock 0x44; byte 0x33;", 3, Expect{1, "byte/addr/method used without value in bytecblock"})
+
+ // Or if the ref comes before the constant block, even if they match
+ testProg(t, "int 5; intcblock 4;", 3, Expect{1, "intcblock following int"})
+ testProg(t, "int 4; intcblock 4;", 3, Expect{1, "intcblock following int"})
+ testProg(t, "addr RWXCBB73XJITATVQFOI7MVUUQOL2PFDDSDUMW4H4T2SNSX4SEUOQ2MM7F4; bytecblock 0x44", 3, Expect{1, "bytecblock following byte/addr/method"})
+
+ // But we can't complain precisely once backjumps are allowed, so we force
+ // compile to push*. (We don't analyze the CFG, so we don't know if we can
+ // use what is in the user defined block. Perhaps we could special case
+ // single cblocks at start of program.
+ checkSame(t, 4,
+ "intcblock 4 5 1; int 4; int 1; +; int 5; ==",
+ "intcblock 4 5 1; pushint 4; pushint 1; +; pushint 5; ==")
+ checkSame(t, 4,
+ "bytecblock 0x44 0x55 0x4455; byte 0x44; byte 0x55; concat; byte 0x4455; ==",
+ "bytecblock 0x44 0x55 0x4455; pushbytes 0x44; pushbytes 0x55; concat; pushbytes 0x4455; ==")
+ // Can't switch to push* after the fact.
+ testProg(t, "int 5; intcblock 4;", 4, Expect{1, "intcblock following int"})
+ testProg(t, "int 4; intcblock 4;", 4, Expect{1, "intcblock following int"})
+ testProg(t, "addr RWXCBB73XJITATVQFOI7MVUUQOL2PFDDSDUMW4H4T2SNSX4SEUOQ2MM7F4; bytecblock 0x44", 4, Expect{1, "bytecblock following byte/addr/method"})
+
+ // Ignore manually added cblocks in deadcode, so they can be added easily to
+ // existing programs. There are proposals to put metadata there.
+ ops = testProg(t, "int 4; int 4; +; int 8; ==; return; intcblock 10", AssemblerMaxVersion)
+ require.Equal(t, ops.Program[1], OpsByName[ops.Version]["intcblock"].Opcode)
+ require.EqualValues(t, ops.Program[3], 4) // <intcblock> 1 4 <intc_0>
+ require.Equal(t, ops.Program[4], OpsByName[ops.Version]["intc_0"].Opcode)
+ ops = testProg(t, "b skip; intcblock 10; skip: int 4; int 4; +; int 8; ==;", AssemblerMaxVersion)
+ require.Equal(t, ops.Program[1], OpsByName[ops.Version]["intcblock"].Opcode)
+ require.EqualValues(t, ops.Program[3], 4)
+
+ ops = testProg(t, "byte 0x44; byte 0x44; concat; len; return; bytecblock 0x11", AssemblerMaxVersion)
+ require.Equal(t, ops.Program[1], OpsByName[ops.Version]["bytecblock"].Opcode)
+ require.EqualValues(t, ops.Program[4], 0x44) // <bytecblock> 1 1 0x44 <bytec_0>
+ require.Equal(t, ops.Program[5], OpsByName[ops.Version]["bytec_0"].Opcode)
+ ops = testProg(t, "b skip; bytecblock 0x11; skip: byte 0x44; byte 0x44; concat; len; int 4; ==", AssemblerMaxVersion)
+ require.Equal(t, ops.Program[1], OpsByName[ops.Version]["bytecblock"].Opcode)
+ require.EqualValues(t, ops.Program[4], 0x44)
+}
+
+func TestManualCBlocksPreBackBranch(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // Before backbranch enabled, the assembler is willing to assemble an `int`
+ // reference after an intcblock as an intc. It uses the most recent seen
+ // non-deadcode intcblock, so it *could* be wrong.
+ testProg(t, "intcblock 10 20; int 10;", backBranchEnabledVersion-1)
+ // By the same token, assembly complains if that intcblock doesn't have the
+ // constant. In v3, and v3 only, it *could* pushint.
+ testProg(t, "intcblock 10 20; int 30;", backBranchEnabledVersion-1, Expect{1, "int 30 used..."})
+
+ // Since the second intcblock is dead, the `int 10` "sees" the first block, not the second
+ testProg(t, "intcblock 10 20; b skip; intcblock 3 4 5; skip: int 10;", backBranchEnabledVersion-1)
+ testProg(t, "intcblock 10 20; b skip; intcblock 3 4 5; skip: int 3;", backBranchEnabledVersion-1,
+ Expect{1, "int 3 used..."})
+
+ // Here, the intcblock in effect is unknowable, better to force the user to
+ // use intc (unless pushint is available to save the day).
+
+ // backBranchEnabledVersion-1 contains pushint
+ testProg(t, "intcblock 10 20; txn NumAppArgs; bz skip; intcblock 3 4 5; skip: int 10;", backBranchEnabledVersion-1)
+ testProg(t, "intcblock 10 20; txn NumAppArgs; bz skip; intcblock 3 4 5; skip: int 3;", backBranchEnabledVersion-1)
+
+ // backBranchEnabledVersion-2 does not
+ testProg(t, "intcblock 10 20; txn NumAppArgs; bz skip; intcblock 3 4 5; skip: int 10;", backBranchEnabledVersion-2,
+ Expect{1, "int 10 used with manual intcblocks. Use intc."})
+ testProg(t, "intcblock 10 20; txn NumAppArgs; bz skip; intcblock 3 4 5; skip: int 3;", backBranchEnabledVersion-2,
+ Expect{1, "int 3 used with manual intcblocks. Use intc."})
+
+ // REPEAT ABOVE, BUT FOR BYTE BLOCKS
+
+ testProg(t, "bytecblock 0x10 0x20; byte 0x10;", backBranchEnabledVersion-1)
+ testProg(t, "bytecblock 0x10 0x20; byte 0x30;", backBranchEnabledVersion-1, Expect{1, "byte/addr/method used..."})
+ testProg(t, "bytecblock 0x10 0x20; b skip; bytecblock 0x03 0x04 0x05; skip: byte 0x10;", backBranchEnabledVersion-1)
+ testProg(t, "bytecblock 0x10 0x20; b skip; bytecblock 0x03 0x04 0x05; skip: byte 0x03;", backBranchEnabledVersion-1,
+ Expect{1, "byte/addr/method used..."})
+ testProg(t, "bytecblock 0x10 0x20; txn NumAppArgs; bz skip; bytecblock 0x03 0x04 0x05; skip: byte 0x10;", backBranchEnabledVersion-1)
+ testProg(t, "bytecblock 0x10 0x20; txn NumAppArgs; bz skip; bytecblock 0x03 0x04 0x05; skip: byte 0x03;", backBranchEnabledVersion-1)
+ testProg(t, "bytecblock 0x10 0x20; txn NumAppArgs; bz skip; bytecblock 0x03 0x04 0x05; skip: byte 0x10;", backBranchEnabledVersion-2,
+ Expect{1, "byte 0x10 used with manual bytecblocks. Use bytec."})
+ testProg(t, "bytecblock 0x10 0x20; txn NumAppArgs; bz skip; bytecblock 0x03 0x04 0x05; skip: byte 0x03;", backBranchEnabledVersion-2,
+ Expect{1, "byte 0x03 used with manual bytecblocks. Use bytec."})
+}
+
func TestAssembleOptimizedConstants(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -1111,209 +1261,91 @@ func TestFieldsFromLine(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- line := "op arg"
- fields := fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "arg", fields[1])
-
- line = "op arg // test"
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "arg", fields[1])
-
- line = "op base64 ABC//=="
- fields = fieldsFromLine(line)
- require.Equal(t, 3, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64", fields[1])
- require.Equal(t, "ABC//==", fields[2])
-
- line = "op base64 ABC/=="
- fields = fieldsFromLine(line)
- require.Equal(t, 3, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64", fields[1])
- require.Equal(t, "ABC/==", fields[2])
-
- line = "op base64 ABC/== /"
- fields = fieldsFromLine(line)
- require.Equal(t, 4, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64", fields[1])
- require.Equal(t, "ABC/==", fields[2])
- require.Equal(t, "/", fields[3])
-
- line = "op base64 ABC/== //"
- fields = fieldsFromLine(line)
- require.Equal(t, 3, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64", fields[1])
- require.Equal(t, "ABC/==", fields[2])
-
- line = "op base64 ABC//== //"
- fields = fieldsFromLine(line)
- require.Equal(t, 3, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64", fields[1])
- require.Equal(t, "ABC//==", fields[2])
-
- line = "op b64 ABC//== //"
- fields = fieldsFromLine(line)
- require.Equal(t, 3, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "b64", fields[1])
- require.Equal(t, "ABC//==", fields[2])
-
- line = "op b64(ABC//==) // comment"
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "b64(ABC//==)", fields[1])
-
- line = "op base64(ABC//==) // comment"
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64(ABC//==)", fields[1])
-
- line = "op b64(ABC/==) // comment"
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "b64(ABC/==)", fields[1])
-
- line = "op base64(ABC/==) // comment"
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64(ABC/==)", fields[1])
-
- line = "base64(ABC//==)"
- fields = fieldsFromLine(line)
- require.Equal(t, 1, len(fields))
- require.Equal(t, "base64(ABC//==)", fields[0])
-
- line = "b(ABC//==)"
- fields = fieldsFromLine(line)
- require.Equal(t, 1, len(fields))
- require.Equal(t, "b(ABC", fields[0])
-
- line = "b(ABC//==) //"
- fields = fieldsFromLine(line)
- require.Equal(t, 1, len(fields))
- require.Equal(t, "b(ABC", fields[0])
-
- line = "b(ABC ==) //"
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "b(ABC", fields[0])
- require.Equal(t, "==)", fields[1])
-
- line = "op base64 ABC)"
- fields = fieldsFromLine(line)
- require.Equal(t, 3, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64", fields[1])
- require.Equal(t, "ABC)", fields[2])
-
- line = "op base64 ABC) // comment"
- fields = fieldsFromLine(line)
- require.Equal(t, 3, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64", fields[1])
- require.Equal(t, "ABC)", fields[2])
-
- line = "op base64 ABC//) // comment"
- fields = fieldsFromLine(line)
- require.Equal(t, 3, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64", fields[1])
- require.Equal(t, "ABC//)", fields[2])
-
- line = `op "test"`
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test"`, fields[1])
-
- line = `op "test1 test2"`
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test1 test2"`, fields[1])
-
- line = `op "test1 test2" // comment`
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test1 test2"`, fields[1])
-
- line = `op "test1 test2 // not a comment"`
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test1 test2 // not a comment"`, fields[1])
-
- line = `op "test1 test2 // not a comment" // comment`
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test1 test2 // not a comment"`, fields[1])
-
- line = `op "test1 test2 // not a comment" // comment`
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test1 test2 // not a comment"`, fields[1])
-
- line = `op "test1 test2" //`
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test1 test2"`, fields[1])
-
- line = `op "test1 test2"//`
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test1 test2"`, fields[1])
-
- line = `op "test1 test2` // non-terminated string literal
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test1 test2`, fields[1])
-
- line = `op "test1 test2\"` // non-terminated string literal
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test1 test2\"`, fields[1])
-
- line = `op \"test1 test2\"` // not a string literal
- fields = fieldsFromLine(line)
- require.Equal(t, 3, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `\"test1`, fields[1])
- require.Equal(t, `test2\"`, fields[2])
-
- line = `"test1 test2"`
- fields = fieldsFromLine(line)
- require.Equal(t, 1, len(fields))
- require.Equal(t, `"test1 test2"`, fields[0])
-
- line = `\"test1 test2"`
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, `\"test1`, fields[0])
- require.Equal(t, `test2"`, fields[1])
-
- line = `"" // test`
- fields = fieldsFromLine(line)
- require.Equal(t, 1, len(fields))
- require.Equal(t, `""`, fields[0])
+ check := func(line string, tokens ...string) {
+ t.Helper()
+ assert.Equal(t, tokensFromLine(line), tokens)
+ }
+
+ check("op arg", "op", "arg")
+ check("op arg // test", "op", "arg")
+ check("op base64 ABC//==", "op", "base64", "ABC//==")
+ check("op base64 base64", "op", "base64", "base64")
+ check("op base64 base64 //comment", "op", "base64", "base64")
+ check("op base64 base64; op2 //done", "op", "base64", "base64", ";", "op2")
+ check("op base64 ABC/==", "op", "base64", "ABC/==")
+ check("op base64 ABC/== /", "op", "base64", "ABC/==", "/")
+ check("op base64 ABC/== //", "op", "base64", "ABC/==")
+ check("op base64 ABC//== //", "op", "base64", "ABC//==")
+ check("op b64 ABC//== //", "op", "b64", "ABC//==")
+ check("op b64(ABC//==) // comment", "op", "b64(ABC//==)")
+ check("op base64(ABC//==) // comment", "op", "base64(ABC//==)")
+ check("op b64(ABC/==) // comment", "op", "b64(ABC/==)")
+ check("op base64(ABC/==) // comment", "op", "base64(ABC/==)")
+ check("base64(ABC//==)", "base64(ABC//==)")
+ check("b(ABC//==)", "b(ABC")
+ check("b(ABC//==) //", "b(ABC")
+ check("b(ABC ==) //", "b(ABC", "==)")
+ check("op base64 ABC)", "op", "base64", "ABC)")
+ check("op base64 ABC) // comment", "op", "base64", "ABC)")
+ check("op base64 ABC//) // comment", "op", "base64", "ABC//)")
+ check(`op "test"`, "op", `"test"`)
+ check(`op "test1 test2"`, "op", `"test1 test2"`)
+ check(`op "test1 test2" // comment`, "op", `"test1 test2"`)
+ check(`op "test1 test2 // not a comment"`, "op", `"test1 test2 // not a comment"`)
+ check(`op "test1 test2 // not a comment" // comment`, "op", `"test1 test2 // not a comment"`)
+ check(`op "test1 test2" //`, "op", `"test1 test2"`)
+ check(`op "test1 test2"//`, "op", `"test1 test2"`)
+ check(`op "test1 test2`, "op", `"test1 test2`) // non-terminated string literal
+ check(`op "test1 test2\"`, "op", `"test1 test2\"`) // non-terminated string literal
+ check(`op \"test1 test2\"`, "op", `\"test1`, `test2\"`) // not a string literal
+ check(`"test1 test2"`, `"test1 test2"`)
+ check(`\"test1 test2"`, `\"test1`, `test2"`)
+ check(`"" // test`, `""`)
+ check("int 1; int 2", "int", "1", ";", "int", "2")
+ check("int 1;;;int 2", "int", "1", ";", ";", ";", "int", "2")
+ check("int 1; ;int 2;; ; ;; ", "int", "1", ";", ";", "int", "2", ";", ";", ";", ";", ";")
+ check(";", ";")
+ check("; ; ;;;;", ";", ";", ";", ";", ";", ";")
+ check(" ;", ";")
+ check(" ; ", ";")
+}
+
+func TestSplitTokens(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ check := func(tokens []string, left []string, right []string) {
+ t.Helper()
+ current, next := splitTokens(tokens)
+ assert.Equal(t, left, current)
+ assert.Equal(t, right, next)
+ }
+
+ check([]string{"hey,", "how's", ";", ";", "it", "going", ";"},
+ []string{"hey,", "how's"},
+ []string{";", "it", "going", ";"},
+ )
+
+ check([]string{";"},
+ []string{},
+ []string{},
+ )
+
+ check([]string{";", "it", "going"},
+ []string{},
+ []string{"it", "going"},
+ )
+
+ check([]string{"hey,", "how's"},
+ []string{"hey,", "how's"},
+ nil,
+ )
+
+ check([]string{`"hey in quotes;"`, "getting", `";"`, ";", "tricky"},
+ []string{`"hey in quotes;"`, "getting", `";"`},
+ []string{"tricky"},
+ )
+
}
func TestAssembleRejectNegJump(t *testing.T) {
@@ -1798,22 +1830,22 @@ func TestAssembleAsset(t *testing.T) {
testProg(t, "asset_holding_get ABC 1", v,
Expect{1, "asset_holding_get ABC 1 expects 2 stack arguments..."})
testProg(t, "int 1; asset_holding_get ABC 1", v,
- Expect{2, "asset_holding_get ABC 1 expects 2 stack arguments..."})
+ Expect{1, "asset_holding_get ABC 1 expects 2 stack arguments..."})
testProg(t, "int 1; int 1; asset_holding_get ABC 1", v,
- Expect{3, "asset_holding_get expects 1 immediate argument"})
+ Expect{1, "asset_holding_get expects 1 immediate argument"})
testProg(t, "int 1; int 1; asset_holding_get ABC", v,
- Expect{3, "asset_holding_get unknown field: \"ABC\""})
+ Expect{1, "asset_holding_get unknown field: \"ABC\""})
testProg(t, "byte 0x1234; asset_params_get ABC 1", v,
- Expect{2, "asset_params_get ABC 1 arg 0 wanted type uint64..."})
+ Expect{1, "asset_params_get ABC 1 arg 0 wanted type uint64..."})
// Test that AssetUnitName is known to return bytes
testProg(t, "int 1; asset_params_get AssetUnitName; pop; int 1; +", v,
- Expect{5, "+ arg 0 wanted type uint64..."})
+ Expect{1, "+ arg 0 wanted type uint64..."})
// Test that AssetTotal is known to return uint64
testProg(t, "int 1; asset_params_get AssetTotal; pop; byte 0x12; concat", v,
- Expect{5, "concat arg 0 wanted type []byte..."})
+ Expect{1, "concat arg 0 wanted type []byte..."})
testLine(t, "asset_params_get ABC 1", v, "asset_params_get expects 1 immediate argument")
testLine(t, "asset_params_get ABC", v, "asset_params_get unknown field: \"ABC\"")
@@ -1955,8 +1987,7 @@ intc_0 // 1
bnz label1
label1:
`, v)
- ops, err := AssembleStringWithVersion(source, v)
- require.NoError(t, err)
+ ops := testProg(t, source, v)
dis, err := Disassemble(ops.Program)
require.NoError(t, err)
require.Equal(t, source, dis)
@@ -2069,8 +2100,7 @@ func TestHasStatefulOps(t *testing.T) {
t.Parallel()
source := "int 1"
- ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
+ ops := testProg(t, source, AssemblerMaxVersion)
has, err := HasStatefulOps(ops.Program)
require.NoError(t, err)
require.False(t, has)
@@ -2080,8 +2110,7 @@ int 1
app_opted_in
err
`
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
+ ops = testProg(t, source, AssemblerMaxVersion)
has, err = HasStatefulOps(ops.Program)
require.NoError(t, err)
require.True(t, has)
@@ -2258,46 +2287,38 @@ func TestAssemblePragmaVersion(t *testing.T) {
text := `#pragma version 1
int 1
`
- ops, err := AssembleStringWithVersion(text, 1)
- require.NoError(t, err)
- ops1, err := AssembleStringWithVersion("int 1", 1)
- require.NoError(t, err)
+ ops := testProg(t, text, 1)
+ ops1 := testProg(t, "int 1", 1)
require.Equal(t, ops1.Program, ops.Program)
testProg(t, text, 0, Expect{1, "version mismatch..."})
testProg(t, text, 2, Expect{1, "version mismatch..."})
testProg(t, text, assemblerNoVersion)
- ops, err = AssembleStringWithVersion(text, assemblerNoVersion)
- require.NoError(t, err)
+ ops = testProg(t, text, assemblerNoVersion)
require.Equal(t, ops1.Program, ops.Program)
text = `#pragma version 2
int 1
`
- ops, err = AssembleStringWithVersion(text, 2)
- require.NoError(t, err)
- ops2, err := AssembleStringWithVersion("int 1", 2)
- require.NoError(t, err)
+ ops = testProg(t, text, 2)
+ ops2 := testProg(t, "int 1", 2)
require.Equal(t, ops2.Program, ops.Program)
testProg(t, text, 0, Expect{1, "version mismatch..."})
testProg(t, text, 1, Expect{1, "version mismatch..."})
- ops, err = AssembleStringWithVersion(text, assemblerNoVersion)
- require.NoError(t, err)
+ ops = testProg(t, text, assemblerNoVersion)
require.Equal(t, ops2.Program, ops.Program)
// check if no version it defaults to v1
text = `byte "test"
len
`
- ops, err = AssembleStringWithVersion(text, assemblerNoVersion)
- require.NoError(t, err)
- ops1, err = AssembleStringWithVersion(text, 1)
+ ops = testProg(t, text, assemblerNoVersion)
+ ops1 = testProg(t, text, 1)
require.Equal(t, ops1.Program, ops.Program)
- require.NoError(t, err)
- ops2, err = AssembleString(text)
+ ops2, err := AssembleString(text)
require.NoError(t, err)
require.Equal(t, ops2.Program, ops.Program)
@@ -2325,9 +2346,8 @@ func TestErrShortBytecblock(t *testing.T) {
t.Parallel()
text := `intcblock 0x1234567812345678 0x1234567812345671 0x1234567812345672 0x1234567812345673 4 5 6 7 8`
- ops, err := AssembleStringWithVersion(text, 1)
- require.NoError(t, err)
- _, _, err = parseIntcblock(ops.Program, 1)
+ ops := testProg(t, text, 1)
+ _, _, err := parseIntcblock(ops.Program, 1)
require.Equal(t, err, errShortIntcblock)
var cx EvalContext
@@ -2369,8 +2389,7 @@ func TestMethodWarning(t *testing.T) {
for _, test := range tests {
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
src := fmt.Sprintf("method \"%s\"\nint 1", test.method)
- ops, err := AssembleStringWithVersion(src, v)
- require.NoError(t, err)
+ ops := testProg(t, src, v)
if test.pass {
require.Len(t, ops.Warnings, 0)
@@ -2422,69 +2441,92 @@ func TestSwapTypeCheck(t *testing.T) {
t.Parallel()
/* reconfirm that we detect this type error */
- testProg(t, "int 1; byte 0x1234; +", AssemblerMaxVersion, Expect{3, "+ arg 1..."})
+ testProg(t, "int 1; byte 0x1234; +", AssemblerMaxVersion, Expect{1, "+ arg 1..."})
/* despite swap, we track types */
- testProg(t, "int 1; byte 0x1234; swap; +", AssemblerMaxVersion, Expect{4, "+ arg 0..."})
- testProg(t, "byte 0x1234; int 1; swap; +", AssemblerMaxVersion, Expect{4, "+ arg 1..."})
+ testProg(t, "int 1; byte 0x1234; swap; +", AssemblerMaxVersion, Expect{1, "+ arg 0..."})
+ testProg(t, "byte 0x1234; int 1; swap; +", AssemblerMaxVersion, Expect{1, "+ arg 1..."})
}
func TestDigAsm(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "int 1; dig; +", AssemblerMaxVersion, Expect{2, "dig expects 1 immediate..."})
- testProg(t, "int 1; dig junk; +", AssemblerMaxVersion, Expect{2, "dig unable to parse..."})
+ testProg(t, "int 1; dig; +", AssemblerMaxVersion, Expect{1, "dig expects 1 immediate..."})
+ testProg(t, "int 1; dig junk; +", AssemblerMaxVersion, Expect{1, "dig unable to parse..."})
testProg(t, "int 1; byte 0x1234; int 2; dig 2; +", AssemblerMaxVersion)
testProg(t, "byte 0x32; byte 0x1234; int 2; dig 2; +", AssemblerMaxVersion,
- Expect{5, "+ arg 1..."})
+ Expect{1, "+ arg 1..."})
testProg(t, "byte 0x32; byte 0x1234; int 2; dig 3; +", AssemblerMaxVersion,
- Expect{4, "dig 3 expects 4..."})
+ Expect{1, "dig 3 expects 4..."})
testProg(t, "int 1; byte 0x1234; int 2; dig 12; +", AssemblerMaxVersion,
- Expect{4, "dig 12 expects 13..."})
+ Expect{1, "dig 12 expects 13..."})
// Confirm that digging something out does not ruin our knowledge about the types in the middle
testProg(t, "int 1; byte 0x1234; byte 0x1234; dig 2; dig 3; +; pop; +", AssemblerMaxVersion,
- Expect{8, "+ arg 1..."})
+ Expect{1, "+ arg 1..."})
testProg(t, "int 3; pushbytes \"123456\"; int 1; dig 2; substring3", AssemblerMaxVersion)
}
+func TestBuryAsm(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ testProg(t, "int 1; bury; +", AssemblerMaxVersion, Expect{1, "bury expects 1 immediate..."})
+ testProg(t, "int 1; bury junk; +", AssemblerMaxVersion, Expect{1, "bury unable to parse..."})
+
+ testProg(t, "int 1; byte 0x1234; int 2; bury 1; +", AssemblerMaxVersion) // the 2 replaces the byte string
+ testProg(t, "int 2; int 2; byte 0x1234; bury 1; +", AssemblerMaxVersion,
+ Expect{1, "+ arg 1..."})
+ testProg(t, "byte 0x32; byte 0x1234; int 2; bury 3; +", AssemblerMaxVersion,
+ Expect{1, "bury 3 expects 4..."})
+ testProg(t, "int 1; byte 0x1234; int 2; bury 12; +", AssemblerMaxVersion,
+ Expect{1, "bury 12 expects 13..."})
+
+ // We do not lose track of the ints between ToS and bury index
+ testProg(t, "int 0; int 1; int 2; int 4; bury 3; concat", AssemblerMaxVersion,
+ Expect{1, "concat arg 1 wanted type []byte..."})
+
+ // Even when we are burying into unknown (seems repetitive, but is an easy bug)
+ testProg(t, "int 0; int 0; b LABEL; LABEL: int 1; int 2; int 4; bury 4; concat", AssemblerMaxVersion,
+ Expect{1, "concat arg 1 wanted type []byte..."})
+}
+
func TestEqualsTypeCheck(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "int 1; byte 0x1234; ==", AssemblerMaxVersion, Expect{3, "== arg 0..."})
- testProg(t, "int 1; byte 0x1234; !=", AssemblerMaxVersion, Expect{3, "!= arg 0..."})
- testProg(t, "byte 0x1234; int 1; ==", AssemblerMaxVersion, Expect{3, "== arg 0..."})
- testProg(t, "byte 0x1234; int 1; !=", AssemblerMaxVersion, Expect{3, "!= arg 0..."})
+ testProg(t, "int 1; byte 0x1234; ==", AssemblerMaxVersion, Expect{1, "== arg 0..."})
+ testProg(t, "int 1; byte 0x1234; !=", AssemblerMaxVersion, Expect{1, "!= arg 0..."})
+ testProg(t, "byte 0x1234; int 1; ==", AssemblerMaxVersion, Expect{1, "== arg 0..."})
+ testProg(t, "byte 0x1234; int 1; !=", AssemblerMaxVersion, Expect{1, "!= arg 0..."})
}
func TestDupTypeCheck(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "byte 0x1234; dup; int 1; +", AssemblerMaxVersion, Expect{4, "+ arg 0..."})
+ testProg(t, "byte 0x1234; dup; int 1; +", AssemblerMaxVersion, Expect{1, "+ arg 0..."})
testProg(t, "byte 0x1234; int 1; dup; +", AssemblerMaxVersion)
- testProg(t, "byte 0x1234; int 1; dup2; +", AssemblerMaxVersion, Expect{4, "+ arg 0..."})
- testProg(t, "int 1; byte 0x1234; dup2; +", AssemblerMaxVersion, Expect{4, "+ arg 1..."})
+ testProg(t, "byte 0x1234; int 1; dup2; +", AssemblerMaxVersion, Expect{1, "+ arg 0..."})
+ testProg(t, "int 1; byte 0x1234; dup2; +", AssemblerMaxVersion, Expect{1, "+ arg 1..."})
- testProg(t, "byte 0x1234; int 1; dup; dig 1; len", AssemblerMaxVersion, Expect{5, "len arg 0..."})
- testProg(t, "int 1; byte 0x1234; dup; dig 1; !", AssemblerMaxVersion, Expect{5, "! arg 0..."})
+ testProg(t, "byte 0x1234; int 1; dup; dig 1; len", AssemblerMaxVersion, Expect{1, "len arg 0..."})
+ testProg(t, "int 1; byte 0x1234; dup; dig 1; !", AssemblerMaxVersion, Expect{1, "! arg 0..."})
- testProg(t, "byte 0x1234; int 1; dup2; dig 2; len", AssemblerMaxVersion, Expect{5, "len arg 0..."})
- testProg(t, "int 1; byte 0x1234; dup2; dig 2; !", AssemblerMaxVersion, Expect{5, "! arg 0..."})
+ testProg(t, "byte 0x1234; int 1; dup2; dig 2; len", AssemblerMaxVersion, Expect{1, "len arg 0..."})
+ testProg(t, "int 1; byte 0x1234; dup2; dig 2; !", AssemblerMaxVersion, Expect{1, "! arg 0..."})
}
func TestSelectTypeCheck(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "int 1; int 2; int 3; select; len", AssemblerMaxVersion, Expect{5, "len arg 0..."})
- testProg(t, "byte 0x1234; byte 0x5678; int 3; select; !", AssemblerMaxVersion, Expect{5, "! arg 0..."})
+ testProg(t, "int 1; int 2; int 3; select; len", AssemblerMaxVersion, Expect{1, "len arg 0..."})
+ testProg(t, "byte 0x1234; byte 0x5678; int 3; select; !", AssemblerMaxVersion, Expect{1, "! arg 0..."})
}
func TestSetBitTypeCheck(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "int 1; int 2; int 3; setbit; len", AssemblerMaxVersion, Expect{5, "len arg 0..."})
- testProg(t, "byte 0x1234; int 2; int 3; setbit; !", AssemblerMaxVersion, Expect{5, "! arg 0..."})
+ testProg(t, "int 1; int 2; int 3; setbit; len", AssemblerMaxVersion, Expect{1, "len arg 0..."})
+ testProg(t, "byte 0x1234; int 2; int 3; setbit; !", AssemblerMaxVersion, Expect{1, "! arg 0..."})
}
func TestScratchTypeCheck(t *testing.T) {
@@ -2493,13 +2535,13 @@ func TestScratchTypeCheck(t *testing.T) {
// All scratch slots should start as uint64
testProg(t, "load 0; int 1; +", AssemblerMaxVersion)
// Check load and store accurately using the scratch space
- testProg(t, "byte 0x01; store 0; load 0; int 1; +", AssemblerMaxVersion, Expect{5, "+ arg 0..."})
+ testProg(t, "byte 0x01; store 0; load 0; int 1; +", AssemblerMaxVersion, Expect{1, "+ arg 0..."})
// Loads should know the type it's loading if all the slots are the same type
- testProg(t, "int 0; loads; btoi", AssemblerMaxVersion, Expect{3, "btoi arg 0..."})
+ testProg(t, "int 0; loads; btoi", AssemblerMaxVersion, Expect{1, "btoi arg 0..."})
// Loads doesn't know the type when slot types vary
testProg(t, "byte 0x01; store 0; int 1; loads; btoi", AssemblerMaxVersion)
// Stores should only set slots to StackAny if they are not the same type as what is being stored
- testProg(t, "byte 0x01; store 0; int 3; byte 0x01; stores; load 0; int 1; +", AssemblerMaxVersion, Expect{8, "+ arg 0..."})
+ testProg(t, "byte 0x01; store 0; int 3; byte 0x01; stores; load 0; int 1; +", AssemblerMaxVersion, Expect{1, "+ arg 0..."})
// ScratchSpace should reset after hitting label in deadcode
testProg(t, "byte 0x01; store 0; b label1; label1:; load 0; int 1; +", AssemblerMaxVersion)
// But it should reset to StackAny not uint64
@@ -2507,7 +2549,32 @@ func TestScratchTypeCheck(t *testing.T) {
// Callsubs should also reset the scratch space
testProg(t, "callsub A; load 0; btoi; return; A: byte 0x01; store 0; retsub", AssemblerMaxVersion)
// But the scratchspace should still be tracked after the callsub
- testProg(t, "callsub A; int 1; store 0; load 0; btoi; return; A: retsub", AssemblerMaxVersion, Expect{5, "btoi arg 0..."})
+ testProg(t, "callsub A; int 1; store 0; load 0; btoi; return; A: retsub", AssemblerMaxVersion, Expect{1, "btoi arg 0..."})
+}
+
+// TestProtoAsm confirms that the assembler will yell at you if you are
+// clearly dipping into the arguments when using `proto`. You should be using
+// `frame_dig`.
+func TestProtoAsm(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ testProg(t, "proto 0 0", AssemblerMaxVersion, Expect{1, "proto must be unreachable..."})
+ testProg(t, notrack("proto 0 0"), AssemblerMaxVersion)
+ testProg(t, "b a; int 1; a: proto 0 0", AssemblerMaxVersion) // we could flag a `b` to `proto`
+
+ testProg(t, `
+ int 10
+ int 20
+ callsub main
+ int 1
+ return
+main:
+ proto 2 1
+ + // This consumes the top arg. We complain.
+ dup; dup // Even though the dup;dup restores it, so it _evals_ fine.
+ retsub
+`, AssemblerMaxVersion)
+
}
func TestCoverAsm(t *testing.T) {
@@ -2515,9 +2582,10 @@ func TestCoverAsm(t *testing.T) {
t.Parallel()
testProg(t, `int 4; byte "john"; int 5; cover 2; pop; +`, AssemblerMaxVersion)
testProg(t, `int 4; byte "ayush"; int 5; cover 1; pop; +`, AssemblerMaxVersion)
- testProg(t, `int 4; byte "john"; int 5; cover 2; +`, AssemblerMaxVersion, Expect{5, "+ arg 1..."})
+ testProg(t, `int 4; byte "john"; int 5; cover 2; +`, AssemblerMaxVersion, Expect{1, "+ arg 1..."})
- testProg(t, `int 4; cover junk`, AssemblerMaxVersion, Expect{2, "cover unable to parse n ..."})
+ testProg(t, `int 4; cover junk`, AssemblerMaxVersion, Expect{1, "cover unable to parse n ..."})
+ testProg(t, notrack(`int 4; int 5; cover 0`), AssemblerMaxVersion)
}
func TestUncoverAsm(t *testing.T) {
@@ -2526,38 +2594,38 @@ func TestUncoverAsm(t *testing.T) {
testProg(t, `int 4; byte "john"; int 5; uncover 2; +`, AssemblerMaxVersion)
testProg(t, `int 4; byte "ayush"; int 5; uncover 1; pop; +`, AssemblerMaxVersion)
testProg(t, `int 1; byte "jj"; byte "ayush"; byte "john"; int 5; uncover 4; +`, AssemblerMaxVersion)
- testProg(t, `int 4; byte "ayush"; int 5; uncover 1; +`, AssemblerMaxVersion, Expect{5, "+ arg 1..."})
+ testProg(t, `int 4; byte "ayush"; int 5; uncover 1; +`, AssemblerMaxVersion, Expect{1, "+ arg 1..."})
}
func TestTxTypes(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "itxn_begin; itxn_field Sender", 5, Expect{2, "itxn_field Sender expects 1 stack argument..."})
- testProg(t, "itxn_begin; int 1; itxn_field Sender", 5, Expect{3, "...wanted type []byte got uint64"})
+ testProg(t, "itxn_begin; itxn_field Sender", 5, Expect{1, "itxn_field Sender expects 1 stack argument..."})
+ testProg(t, "itxn_begin; int 1; itxn_field Sender", 5, Expect{1, "...wanted type []byte got uint64"})
testProg(t, "itxn_begin; byte 0x56127823; itxn_field Sender", 5)
- testProg(t, "itxn_begin; itxn_field Amount", 5, Expect{2, "itxn_field Amount expects 1 stack argument..."})
- testProg(t, "itxn_begin; byte 0x87123376; itxn_field Amount", 5, Expect{3, "...wanted type uint64 got []byte"})
+ testProg(t, "itxn_begin; itxn_field Amount", 5, Expect{1, "itxn_field Amount expects 1 stack argument..."})
+ testProg(t, "itxn_begin; byte 0x87123376; itxn_field Amount", 5, Expect{1, "...wanted type uint64 got []byte"})
testProg(t, "itxn_begin; int 1; itxn_field Amount", 5)
}
func TestBadInnerFields(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "itxn_begin; int 1000; itxn_field FirstValid", 5, Expect{3, "...is not allowed."})
- testProg(t, "itxn_begin; int 1000; itxn_field FirstValidTime", 5, Expect{3, "...is not allowed."})
- testProg(t, "itxn_begin; int 1000; itxn_field LastValid", 5, Expect{3, "...is not allowed."})
- testProg(t, "itxn_begin; int 32; bzero; itxn_field Lease", 5, Expect{4, "...is not allowed."})
- testProg(t, "itxn_begin; byte 0x7263; itxn_field Note", 5, Expect{3, "...Note field was introduced in v6..."})
- testProg(t, "itxn_begin; byte 0x7263; itxn_field VotePK", 5, Expect{3, "...VotePK field was introduced in v6..."})
- testProg(t, "itxn_begin; int 32; bzero; itxn_field TxID", 5, Expect{4, "...is not allowed."})
-
- testProg(t, "itxn_begin; int 1000; itxn_field FirstValid", 6, Expect{3, "...is not allowed."})
- testProg(t, "itxn_begin; int 1000; itxn_field LastValid", 6, Expect{3, "...is not allowed."})
- testProg(t, "itxn_begin; int 32; bzero; itxn_field Lease", 6, Expect{4, "...is not allowed."})
+ testProg(t, "itxn_begin; int 1000; itxn_field FirstValid", 5, Expect{1, "...is not allowed."})
+ testProg(t, "itxn_begin; int 1000; itxn_field FirstValidTime", 5, Expect{1, "...is not allowed."})
+ testProg(t, "itxn_begin; int 1000; itxn_field LastValid", 5, Expect{1, "...is not allowed."})
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field Lease", 5, Expect{1, "...is not allowed."})
+ testProg(t, "itxn_begin; byte 0x7263; itxn_field Note", 5, Expect{1, "...Note field was introduced in v6..."})
+ testProg(t, "itxn_begin; byte 0x7263; itxn_field VotePK", 5, Expect{1, "...VotePK field was introduced in v6..."})
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field TxID", 5, Expect{1, "...is not allowed."})
+
+ testProg(t, "itxn_begin; int 1000; itxn_field FirstValid", 6, Expect{1, "...is not allowed."})
+ testProg(t, "itxn_begin; int 1000; itxn_field LastValid", 6, Expect{1, "...is not allowed."})
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field Lease", 6, Expect{1, "...is not allowed."})
testProg(t, "itxn_begin; byte 0x7263; itxn_field Note", 6)
testProg(t, "itxn_begin; byte 0x7263; itxn_field VotePK", 6)
- testProg(t, "itxn_begin; int 32; bzero; itxn_field TxID", 6, Expect{4, "...is not allowed."})
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field TxID", 6, Expect{1, "...is not allowed."})
}
func TestTypeTracking(t *testing.T) {
@@ -2573,7 +2641,7 @@ func TestTypeTracking(t *testing.T) {
// but we do want to ensure we're not just treating the code after callsub as dead
testProg(t, "callsub A; int 1; concat; return; A: int 1; int 2; retsub", LogicVersion,
- Expect{3, "concat arg 1 wanted..."})
+ Expect{1, "concat arg 1 wanted..."})
// retsub deadens code, like any unconditional branch
testProg(t, "callsub A; +; return; A: int 1; int 2; retsub; concat", LogicVersion)
@@ -2673,7 +2741,7 @@ func TestMergeProtos(t *testing.T) {
func TestGetSpec(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ops, _ := AssembleStringWithVersion("int 1", AssemblerMaxVersion)
+ ops := testProg(t, "int 1", AssemblerMaxVersion)
ops.versionedPseudoOps["dummyPseudo"] = make(map[int]OpSpec)
ops.versionedPseudoOps["dummyPseudo"][1] = OpSpec{Name: "b:", Version: AssemblerMaxVersion, Proto: proto("b:")}
ops.versionedPseudoOps["dummyPseudo"][2] = OpSpec{Name: ":", Version: AssemblerMaxVersion}
@@ -2697,7 +2765,7 @@ func TestAddPseudoDocTags(t *testing.T) {
delete(opDocByName, "any")
}()
- pseudoOps["tests"] = map[int]OpSpec{2: OpSpec{Name: "multiple"}, 1: OpSpec{Name: "single"}, 0: OpSpec{Name: "none"}, anyImmediates: OpSpec{Name: "any"}}
+ pseudoOps["tests"] = map[int]OpSpec{2: {Name: "multiple"}, 1: {Name: "single"}, 0: {Name: "none"}, anyImmediates: {Name: "any"}}
addPseudoDocTags()
require.Equal(t, "`multiple` can be called using `tests` with 2 immediates.", opDocByName["multiple"])
require.Equal(t, "`single` can be called using `tests` with 1 immediate.", opDocByName["single"])
@@ -2711,7 +2779,114 @@ func TestReplacePseudo(t *testing.T) {
for v := uint64(replaceVersion); v <= AssemblerMaxVersion; v++ {
testProg(t, "byte 0x0000; byte 0x1234; replace 0", v)
testProg(t, "byte 0x0000; int 0; byte 0x1234; replace", v)
- testProg(t, "byte 0x0000; byte 0x1234; replace", v, Expect{3, "replace without immediates expects 3 stack arguments but stack height is 2"})
- testProg(t, "byte 0x0000; int 0; byte 0x1234; replace 0", v, Expect{4, "replace 0 arg 0 wanted type []byte got uint64"})
+ testProg(t, "byte 0x0000; byte 0x1234; replace", v, Expect{1, "replace without immediates expects 3 stack arguments but stack height is 2"})
+ testProg(t, "byte 0x0000; int 0; byte 0x1234; replace 0", v, Expect{1, "replace 0 arg 0 wanted type []byte got uint64"})
+ }
+}
+
+func checkSame(t *testing.T, version uint64, first string, compares ...string) {
+ t.Helper()
+ if version == 0 {
+ version = assemblerNoVersion
+ }
+ ops := testProg(t, first, version)
+ for _, compare := range compares {
+ other := testProg(t, compare, version)
+ if bytes.Compare(other.Program, ops.Program) != 0 {
+ t.Log(Disassemble(ops.Program))
+ t.Log(Disassemble(other.Program))
+ }
+ assert.Equal(t, ops.Program, other.Program, "%s unlike %s", first, compare)
+ }
+}
+
+func TestSemiColon(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ checkSame(t, AssemblerMaxVersion,
+ "pushint 0 ; pushint 1 ; +; int 3 ; *",
+ "pushint 0\npushint 1\n+\nint 3\n*",
+ "pushint 0; pushint 1; +; int 3; *; // comment; int 2",
+ "pushint 0; ; ; pushint 1 ; +; int 3 ; *//check",
+ )
+
+ checkSame(t, 0,
+ "#pragma version 7\nint 1",
+ "// junk;\n#pragma version 7\nint 1",
+ "// junk;\n #pragma version 7\nint 1",
+ )
+
+ checkSame(t, AssemblerMaxVersion,
+ `byte "test;this"; pop;`,
+ `byte "test;this"; ; pop;`,
+ `byte "test;this";;;pop;`,
+ )
+}
+
+func TestAssembleSwitch(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // fail when target doesn't correspond to existing label
+ source := `
+ pushint 1
+ switch label1 label2
+ label1:
+ `
+ testProg(t, source, AssemblerMaxVersion, NewExpect(3, "reference to undefined label \"label2\""))
+
+ // fail when target index != uint64
+ testProg(t, `
+ byte "fail"
+ switch label1
+ labe11:
+ `, AssemblerMaxVersion, Expect{3, "switch label1 arg 0 wanted type uint64..."})
+
+ // No labels is pretty degenerate, but ok, I suppose. It's just a no-op
+ testProg(t, `
+int 0
+switch
+int 1
+`, AssemblerMaxVersion)
+
+ // confirm arg limit
+ source = `
+ pushint 1
+ switch label1 label2
+ label1:
+ label2:
+ `
+ ops := testProg(t, source, AssemblerMaxVersion)
+ require.Len(t, ops.Program, 9) // ver (1) + pushint (2) + opcode (1) + length (1) + labels (2*2)
+
+ var labels []string
+ for i := 0; i < 255; i++ {
+ labels = append(labels, fmt.Sprintf("label%d", i))
}
+
+ // test that 255 labels is ok
+ source = fmt.Sprintf(`
+ pushint 1
+ switch %s
+ %s
+ `, strings.Join(labels, " "), strings.Join(labels, ":\n")+":\n")
+ ops = testProg(t, source, AssemblerMaxVersion)
+ require.Len(t, ops.Program, 515) // ver (1) + pushint (2) + opcode (1) + length (1) + labels (2*255)
+
+ // 256 is too many
+ source = fmt.Sprintf(`
+ pushint 1
+ switch %s extra
+ %s
+ `, strings.Join(labels, " "), strings.Join(labels, ":\n")+":\n")
+ ops = testProg(t, source, AssemblerMaxVersion, Expect{3, "switch cannot take more than 255 labels"})
+
+ // allow duplicate label reference
+ source = `
+ pushint 1
+ switch label1 label1
+ label1:
+ `
+ testProg(t, source, AssemblerMaxVersion)
}
diff --git a/data/transactions/logic/backwardCompat_test.go b/data/transactions/logic/backwardCompat_test.go
index 086741dcd..13a19ddaa 100644
--- a/data/transactions/logic/backwardCompat_test.go
+++ b/data/transactions/logic/backwardCompat_test.go
@@ -467,15 +467,15 @@ func TestBackwardCompatAssemble(t *testing.T) {
source := "int 1; int 1; bnz done; done:"
t.Run("v=default", func(t *testing.T) {
- testProg(t, source, assemblerNoVersion, Expect{4, "label \"done\" is too far away"})
+ testProg(t, source, assemblerNoVersion, Expect{1, "label \"done\" is too far away"})
})
t.Run("v=default", func(t *testing.T) {
- testProg(t, source, 0, Expect{4, "label \"done\" is too far away"})
+ testProg(t, source, 0, Expect{1, "label \"done\" is too far away"})
})
t.Run("v=default", func(t *testing.T) {
- testProg(t, source, 1, Expect{4, "label \"done\" is too far away"})
+ testProg(t, source, 1, Expect{1, "label \"done\" is too far away"})
})
for v := uint64(2); v <= AssemblerMaxVersion; v++ {
diff --git a/data/transactions/logic/debugger.go b/data/transactions/logic/debugger.go
index cae8f7111..a2a0453c4 100644
--- a/data/transactions/logic/debugger.go
+++ b/data/transactions/logic/debugger.go
@@ -202,12 +202,12 @@ func valueDeltaToValueDelta(vd *basics.ValueDelta) basics.ValueDelta {
// parseCallStack initializes an array of CallFrame objects from the raw
// callstack.
-func (d *DebugState) parseCallstack(callstack []int) []CallFrame {
+func (d *DebugState) parseCallstack(callstack []frame) []CallFrame {
callFrames := make([]CallFrame, 0)
lines := strings.Split(d.Disassembly, "\n")
- for _, pc := range callstack {
+ for _, fr := range callstack {
// The callsub is pc - 3 from the callstack pc
- callsubLineNum := d.PCToLine(pc - 3)
+ callsubLineNum := d.PCToLine(fr.retpc - 3)
callSubLine := strings.Fields(lines[callsubLineNum])
label := ""
if callSubLine[0] == "callsub" {
diff --git a/data/transactions/logic/debugger_test.go b/data/transactions/logic/debugger_test.go
index 060b953fc..f33e8ae5c 100644
--- a/data/transactions/logic/debugger_test.go
+++ b/data/transactions/logic/debugger_test.go
@@ -202,7 +202,7 @@ func TestParseCallstack(t *testing.T) {
Disassembly: testCallStackProgram,
PCOffset: []PCOffset{{PC: 1, Offset: 18}, {PC: 4, Offset: 30}, {PC: 7, Offset: 45}, {PC: 8, Offset: 65}, {PC: 11, Offset: 88}},
}
- callstack := []int{4, 8}
+ callstack := []frame{{retpc: 4}, {retpc: 8}}
cfs := dState.parseCallstack(callstack)
require.Equal(t, expectedCallFrames, cfs)
diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go
index 1a43995c2..a12149bcf 100644
--- a/data/transactions/logic/doc.go
+++ b/data/transactions/logic/doc.go
@@ -126,7 +126,9 @@ var opDocByName = map[string]string{
"pop": "discard A",
"dup": "duplicate A",
"dup2": "duplicate A and B",
+ "dupn": "duplicate A, N times",
"dig": "Nth value from the top of the stack. dig 0 is equivalent to dup",
+ "bury": "Replace the Nth value from the top of the stack. bury 0 fails.",
"cover": "remove top of stack, and place it deeper in the stack such that N elements are above it. Fails if stack depth <= N.",
"uncover": "remove the value at depth N in the stack and shift above items down so the Nth deep value is on top of the stack. Fails if stack depth <= N.",
"swap": "swaps A and B on stack",
@@ -192,7 +194,14 @@ var opDocByName = map[string]string{
"itxn_submit": "execute the current inner transaction group. Fail if executing this group would exceed the inner transaction limit, or if any transaction in the group fails.",
"vrf_verify": "Verify the proof B of message A against pubkey C. Returns vrf output and verification flag.",
- "block": "field F of block A. Fail unless A falls between txn.LastValid-1002 and the current round (exclusive)",
+ "block": "field F of block A. Fail unless A falls between txn.LastValid-1002 and txn.FirstValid (exclusive)",
+
+ "switch": "branch to the Ath label. Continue at following instruction if index A exceeds the number of labels.",
+
+ "proto": "Prepare top call frame for a retsub that will assume A args and R return values.",
+ "frame_dig": "Nth (signed) value from the frame pointer.",
+ "frame_bury": "Replace the Nth (signed) value from the frame pointer in the stack",
+ "popn": "Remove N values from the top of the stack",
}
// OpDoc returns a description of the op
@@ -236,6 +245,7 @@ var opcodeImmediateNotes = map[string]string{
"extract": "{uint8 start position} {uint8 length}",
"replace2": "{uint8 start position}",
"dig": "{uint8 depth}",
+ "bury": "{uint8 depth}",
"cover": "{uint8 depth}",
"uncover": "{uint8 depth}",
@@ -257,10 +267,18 @@ var opcodeImmediateNotes = map[string]string{
"ecdsa_pk_recover": "{uint8 curve index}",
"base64_decode": "{uint8 encoding index}",
- "json_ref": "{string return type}",
+ "json_ref": "{uint8 return type}",
"vrf_verify": "{uint8 parameters index}",
"block": "{uint8 block field}",
+
+ "switch": "{uint8 branch count} [{int16 branch offset, big-endian}, ...]",
+
+ "proto": "{uint8 arguments} {uint8 return values}",
+ "frame_dig": "{int8 frame slot}",
+ "frame_bury": "{int8 frame slot}",
+ "popn": "{uint8 stack depth}",
+ "dupn": "{uint8 copy count}",
}
// OpImmediateNote returns a short string about immediate data which follows the op byte
@@ -281,8 +299,8 @@ var opDocExtras = map[string]string{
"bnz": "The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Starting at v4, the offset is treated as a signed 16 bit integer allowing for backward branches and looping. In prior version (v1 to v3), branch offsets are limited to forward branches only, 0-0x7fff.\n\nAt v2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before v2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.)",
"bz": "See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`.",
"b": "See `bnz` for details on how branches work. `b` always jumps to the offset.",
- "callsub": "The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.",
- "retsub": "The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.",
+ "callsub": "The call stack is separate from the data stack. Only `callsub`, `retsub`, and `proto` manipulate it.",
+ "retsub": "If the current frame was prepared by `proto A R`, `retsub` will remove the 'A' arguments from the stack, move the `R` return values down, and pop any stack locations above the relocated return values.",
"intcblock": "`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script.",
"bytecblock": "`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script.",
"*": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `mulw`.",
@@ -323,6 +341,7 @@ var opDocExtras = map[string]string{
"itxn_submit": "`itxn_submit` resets the current transaction so that it can not be resubmitted. A new `itxn_begin` is required to prepare another inner transaction.",
"base64_decode": "*Warning*: Usage should be restricted to very rare use cases. In almost all cases, smart contracts should directly handle non-encoded byte-strings. This opcode should only be used in cases where base64 is the only available option, e.g. interoperability with a third-party that only signs base64 strings.\n\n Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See [RFC 4648 sections 4 and 5](https://rfc-editor.org/rfc/rfc4648.html#section-4). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\\n` and `\\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\\r`, or `\\n`.",
"json_ref": "*Warning*: Usage should be restricted to very rare use cases, as JSON decoding is expensive and quite limited. In addition, JSON objects are large and not optimized for size.\n\nAlmost all smart contracts should use simpler and smaller methods (such as the [ABI](https://arc.algorand.foundation/ARCs/arc-0004). This opcode should only be used in cases where JSON is only available option, e.g. when a third-party only signs JSON.",
+ "proto": "Fails unless the last instruction executed was a `callsub`.",
}
// OpDocExtra returns extra documentation text about an op
@@ -339,7 +358,7 @@ var OpGroups = map[string][]string{
"Byte Array Arithmetic": {"b+", "b-", "b/", "b*", "b<", "b>", "b<=", "b>=", "b==", "b!=", "b%", "bsqrt"},
"Byte Array Logic": {"b|", "b&", "b^", "b~"},
"Loading Values": {"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "pushint", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "pushbytes", "bzero", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "args", "txn", "gtxn", "txna", "txnas", "gtxna", "gtxnas", "gtxns", "gtxnsa", "gtxnsas", "global", "load", "loads", "store", "stores", "gload", "gloads", "gloadss", "gaid", "gaids"},
- "Flow Control": {"err", "bnz", "bz", "b", "return", "pop", "dup", "dup2", "dig", "cover", "uncover", "swap", "select", "assert", "callsub", "retsub"},
+ "Flow Control": {"err", "bnz", "bz", "b", "return", "pop", "popn", "dup", "dup2", "dupn", "dig", "bury", "cover", "uncover", "frame_dig", "frame_bury", "swap", "select", "assert", "callsub", "proto", "retsub", "switch"},
"State Access": {"balance", "min_balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get", "app_params_get", "acct_params_get", "log", "block"},
"Inner Transactions": {"itxn_begin", "itxn_next", "itxn_field", "itxn_submit", "itxn", "itxna", "itxnas", "gitxn", "gitxna", "gitxnas"},
}
diff --git a/data/transactions/logic/doc_test.go b/data/transactions/logic/doc_test.go
index f270d9162..e95293106 100644
--- a/data/transactions/logic/doc_test.go
+++ b/data/transactions/logic/doc_test.go
@@ -105,14 +105,29 @@ func TestAllImmediatesDocumented(t *testing.T) {
partitiontest.PartitionTest(t)
for _, op := range OpSpecs {
- count := len(op.OpDetails.Immediates)
+ count := len(op.Immediates)
note := OpImmediateNote(op.Name)
- if count == 1 && op.OpDetails.Immediates[0].kind >= immBytes {
+ if count == 1 && op.Immediates[0].kind >= immBytes {
// More elaborate than can be checked by easy count.
assert.NotEmpty(t, note)
continue
}
assert.Equal(t, count, strings.Count(note, "{"), "opcodeImmediateNotes for %s is wrong", op.Name)
+ assert.Equal(t, count, strings.Count(note, "}"), "opcodeImmediateNotes for %s is wrong", op.Name)
+ for _, imm := range op.Immediates {
+ switch imm.kind {
+ case immByte:
+ require.True(t, strings.HasPrefix(note, "{uint8 "), "%v %v", op.Name, note)
+ case immInt8:
+ require.True(t, strings.HasPrefix(note, "{int8 "), "%v %v", op.Name, note)
+ case immLabel:
+ require.True(t, strings.HasPrefix(note, "{int16 "), "%v %v", op.Name, note)
+ case immInt:
+ require.True(t, strings.HasPrefix(note, "{varuint "), "%v %v", op.Name, note)
+ }
+ close := strings.Index(note, "}")
+ note = strings.TrimPrefix(note[close+1:], " ")
+ }
}
}
diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go
index b648ff778..1e78d6960 100644
--- a/data/transactions/logic/eval.go
+++ b/data/transactions/logic/eval.go
@@ -472,6 +472,15 @@ func (ep *EvalParams) RecordAD(gi int, ad transactions.ApplyData) {
}
}
+type frame struct {
+ retpc int
+ height int
+
+ clear bool // perform "shift and clear" in retsub
+ args int
+ returns int
+}
+
type scratchSpace [256]stackValue
// EvalContext is the execution context of AVM bytecode. It contains the full
@@ -497,8 +506,9 @@ type EvalContext struct {
// keeping the running changes, the debugger can be changed to display them
// as the app runs.
- stack []stackValue
- callstack []int
+ stack []stackValue
+ callstack []frame
+ fromCallsub bool
appID basics.AppIndex
program []byte
@@ -980,7 +990,7 @@ func (cx *EvalContext) step() error {
preheight := len(cx.stack)
err := spec.op(cx)
- if err == nil {
+ if err == nil && !spec.trusted {
postheight := len(cx.stack)
if postheight-preheight != len(spec.Return.Types)-len(spec.Arg.Types) && !spec.AlwaysExits() {
return fmt.Errorf("%s changed stack height improperly %d != %d",
@@ -1341,17 +1351,17 @@ func opLt(cx *EvalContext) error {
// opSwap, opLt, and opNot always succeed (return nil). So error checking elided in Gt,Le,Ge
func opGt(cx *EvalContext) error {
- opSwap(cx)
+ opSwap(cx) //nolint:errcheck // opSwap always succeeds
return opLt(cx)
}
func opLe(cx *EvalContext) error {
- opGt(cx)
+ opGt(cx) //nolint:errcheck // opGt always succeeds
return opNot(cx)
}
func opGe(cx *EvalContext) error {
- opLt(cx)
+ opLt(cx) //nolint:errcheck // opLt always succeeds
return opNot(cx)
}
@@ -1965,12 +1975,17 @@ func opArgs(cx *EvalContext) error {
return opArgN(cx, n)
}
+func decodeBranchOffset(program []byte, pos int) int {
+ // tricky casting to preserve signed value
+ return int(int16(program[pos])<<8 | int16(program[pos+1]))
+}
+
func branchTarget(cx *EvalContext) (int, error) {
- offset := int16(uint16(cx.program[cx.pc+1])<<8 | uint16(cx.program[cx.pc+2]))
+ offset := decodeBranchOffset(cx.program, cx.pc+1)
if offset < 0 && cx.version < backBranchEnabledVersion {
return 0, fmt.Errorf("negative branch offset %x", offset)
}
- target := cx.pc + 3 + int(offset)
+ target := cx.pc + 3 + offset
var branchTooFar bool
if cx.version >= 2 {
// branching to exactly the end of the program (target == len(cx.program)), the next pc after the last instruction, is okay and ends normally
@@ -1985,6 +2000,32 @@ func branchTarget(cx *EvalContext) (int, error) {
return target, nil
}
+func switchTarget(cx *EvalContext, branchIdx uint64) (int, error) {
+ numOffsets := int(cx.program[cx.pc+1])
+
+ end := cx.pc + 2 // end of opcode + number of offsets, beginning of offset list
+ eoi := end + 2*numOffsets // end of instruction
+
+ if eoi > len(cx.program) { // eoi will equal len(p) if switch is last instruction
+ return 0, fmt.Errorf("switch claims to extend beyond program")
+ }
+
+ offset := 0
+ if branchIdx < uint64(numOffsets) {
+ pos := end + int(2*branchIdx) // position of referenced offset: each offset is 2 bytes
+ offset = decodeBranchOffset(cx.program, pos)
+ }
+
+ target := eoi + offset
+
+ // branching to exactly the end of the program (target == len(cx.program)), the next pc after the last instruction,
+ // is okay and ends normally
+ if target > len(cx.program) || target < 0 {
+ return 0, fmt.Errorf("branch target %d outside of program", target)
+ }
+ return target, nil
+}
+
// checks any branch that is {op} {int16 be offset}
func checkBranch(cx *EvalContext) error {
target, err := branchTarget(cx)
@@ -2000,6 +2041,32 @@ func checkBranch(cx *EvalContext) error {
cx.branchTargets[target] = true
return nil
}
+
+// checks switch is encoded properly (and calculates nextpc)
+func checkSwitch(cx *EvalContext) error {
+ numOffsets := int(cx.program[cx.pc+1])
+ eoi := cx.pc + 2 + 2*numOffsets
+
+ for branchIdx := 0; branchIdx < numOffsets; branchIdx++ {
+ target, err := switchTarget(cx, uint64(branchIdx))
+ if err != nil {
+ return err
+ }
+
+ if target < eoi {
+ // If a branch goes backwards, we should have already noted that an instruction began at that location.
+ if _, ok := cx.instructionStarts[target]; !ok {
+ return fmt.Errorf("back branch target %d is not an aligned instruction", target)
+ }
+ }
+ cx.branchTargets[target] = true
+ }
+
+ // this opcode's size is dynamic so nextpc must be set here
+ cx.nextpc = eoi
+ return nil
+}
+
func opBnz(cx *EvalContext) error {
last := len(cx.stack) - 1
cx.nextpc = cx.pc + 3
@@ -2039,9 +2106,39 @@ func opB(cx *EvalContext) error {
return nil
}
+func opSwitch(cx *EvalContext) error {
+ last := len(cx.stack) - 1
+ branchIdx := cx.stack[last].Uint
+
+ cx.stack = cx.stack[:last]
+ target, err := switchTarget(cx, branchIdx)
+ if err != nil {
+ return err
+ }
+ cx.nextpc = target
+ return nil
+}
+
+const protoByte = 0x8a
+
func opCallSub(cx *EvalContext) error {
- cx.callstack = append(cx.callstack, cx.pc+3)
- return opB(cx)
+ cx.callstack = append(cx.callstack, frame{
+ retpc: cx.pc + 3, // retpc is pc _after_ the callsub
+ height: len(cx.stack),
+ })
+ err := opB(cx)
+
+ /* We only set fromCallSub if we know we're jumping to a proto. In opProto,
+ we confirm we came directly from callsub by checking (and resetting) the
+ flag. This is really a little handshake between callsub and proto. Done
+ this way, we don't have to waste time clearing the fromCallsub flag in
+ every instruction, only in proto since we know we're going there next.
+ */
+
+ if cx.nextpc < len(cx.program) && cx.program[cx.nextpc] == protoByte {
+ cx.fromCallsub = true
+ }
+ return err
}
func opRetSub(cx *EvalContext) error {
@@ -2049,9 +2146,26 @@ func opRetSub(cx *EvalContext) error {
if top < 0 {
return errors.New("retsub with empty callstack")
}
- target := cx.callstack[top]
+ frame := cx.callstack[top]
+ if frame.clear { // A `proto` was issued in the subroutine, so retsub cleans up.
+ expect := frame.height + frame.returns
+ if len(cx.stack) < expect { // Check general error case first, only diffentiate when error is assured
+ switch {
+ case len(cx.stack) < frame.height:
+ return fmt.Errorf("retsub executed with stack below frame. Did you pop args?")
+ case len(cx.stack) == frame.height:
+ return fmt.Errorf("retsub executed with no return values on stack. proto declared %d", frame.returns)
+ default:
+ return fmt.Errorf("retsub executed with %d return values on stack. proto declared %d",
+ len(cx.stack)-frame.height, frame.returns)
+ }
+ }
+ argstart := frame.height - frame.args
+ copy(cx.stack[argstart:], cx.stack[frame.height:expect])
+ cx.stack = cx.stack[:argstart+frame.returns]
+ }
cx.callstack = cx.callstack[:top]
- cx.nextpc = target
+ cx.nextpc = frame.retpc
return nil
}
diff --git a/data/transactions/logic/evalCrypto_test.go b/data/transactions/logic/evalCrypto_test.go
index b2c6bec0e..773330fab 100644
--- a/data/transactions/logic/evalCrypto_test.go
+++ b/data/transactions/logic/evalCrypto_test.go
@@ -109,6 +109,9 @@ byte 0x%s
}
func TestVrfVerify(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
ep, _, _ := makeSampleEnv()
testApp(t, notrack("int 1; int 2; int 3; vrf_verify VrfAlgorand"), ep, "arg 0 wanted")
testApp(t, notrack("byte 0x1122; int 2; int 3; vrf_verify VrfAlgorand"), ep, "arg 1 wanted")
diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go
index 3e051fc6b..f5c87abb3 100644
--- a/data/transactions/logic/evalStateful_test.go
+++ b/data/transactions/logic/evalStateful_test.go
@@ -314,7 +314,7 @@ func TestBalance(t *testing.T) {
text = `txn Accounts 1; balance; int 177; ==;`
// won't assemble in old version teal
- testProg(t, text, directRefEnabledVersion-1, Expect{2, "balance arg 0 wanted type uint64..."})
+ testProg(t, text, directRefEnabledVersion-1, Expect{1, "balance arg 0 wanted type uint64..."})
// but legal after that
testApp(t, text, ep)
@@ -475,7 +475,7 @@ func TestMinBalance(t *testing.T) {
testApp(t, "int 1; min_balance; int 1001; ==", ep) // 1 == Accounts[0]
testProg(t, "txn Accounts 1; min_balance; int 1001; ==", directRefEnabledVersion-1,
- Expect{2, "min_balance arg 0 wanted type uint64..."})
+ Expect{1, "min_balance arg 0 wanted type uint64..."})
testProg(t, "txn Accounts 1; min_balance; int 1001; ==", directRefEnabledVersion)
testApp(t, "txn Accounts 1; min_balance; int 1001; ==", ep) // 1 == Accounts[0]
// Receiver opts in
@@ -528,7 +528,7 @@ func TestAppCheckOptedIn(t *testing.T) {
testApp(t, "int 1; int 2; app_opted_in; int 0; ==", pre) // in pre, int 2 is an actual app id
testApp(t, "byte \"aoeuiaoeuiaoeuiaoeuiaoeuiaoeui01\"; int 2; app_opted_in; int 1; ==", now)
testProg(t, "byte \"aoeuiaoeuiaoeuiaoeuiaoeuiaoeui01\"; int 2; app_opted_in; int 1; ==", directRefEnabledVersion-1,
- Expect{3, "app_opted_in arg 0 wanted type uint64..."})
+ Expect{1, "app_opted_in arg 0 wanted type uint64..."})
// Receiver opts into 888, the current app in testApp
ledger.NewLocals(txn.Txn.Receiver, 888)
@@ -939,7 +939,7 @@ func testAssetsByVersion(t *testing.T, assetsTestProgram string, version uint64)
// it wasn't legal to use a direct ref for account
testProg(t, `byte "aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00"; int 54; asset_holding_get AssetBalance`,
- directRefEnabledVersion-1, Expect{3, "asset_holding_get AssetBalance arg 0 wanted type uint64..."})
+ directRefEnabledVersion-1, Expect{1, "asset_holding_get AssetBalance arg 0 wanted type uint64..."})
// but it is now (empty asset yields 0,0 on stack)
testApp(t, `byte "aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00"; int 55; asset_holding_get AssetBalance; ==`, now)
// This is receiver, who is in Assets array
@@ -2374,6 +2374,9 @@ func TestReturnTypes(t *testing.T) {
"json_ref": `: byte "{\"k\": 7}"; byte "k"; json_ref JSONUint64`,
"block": "block BlkSeed",
+
+ "proto": "callsub p; p: proto 0 3",
+ "bury": ": int 1; int 2; int 3; bury 2; pop; pop;",
}
/* Make sure the specialCmd tests the opcode in question */
@@ -2399,6 +2402,9 @@ func TestReturnTypes(t *testing.T) {
"bn256_add": true,
"bn256_scalar_mul": true,
"bn256_pairing": true,
+
+ "frame_dig": true, // would need a "proto" subroutine
+ "frame_bury": true, // would need a "proto" subroutine
}
byName := OpsByName[LogicVersion]
@@ -2408,7 +2414,7 @@ func TestReturnTypes(t *testing.T) {
if (m & spec.Modes) == 0 {
continue
}
- if skipCmd[name] {
+ if skipCmd[name] || spec.trusted {
continue
}
t.Run(fmt.Sprintf("mode=%s,opcode=%s", m, name), func(t *testing.T) {
@@ -2426,6 +2432,8 @@ func TestReturnTypes(t *testing.T) {
switch imm.kind {
case immByte:
cmd += " 0"
+ case immInt8:
+ cmd += " -2"
case immInt:
cmd += " 10"
case immInts:
@@ -2436,6 +2444,8 @@ func TestReturnTypes(t *testing.T) {
cmd += " 0x12 0x34 0x56"
case immLabel:
cmd += " done; done: ;"
+ case immLabels:
+ cmd += " done1 done2; done1: ; done2: ;"
default:
require.Fail(t, "bad immediate", "%s", imm)
}
@@ -2536,6 +2546,9 @@ func TestLatestTimestamp(t *testing.T) {
}
func TestBlockSeed(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
ep, txn, l := makeSampleEnv()
// makeSampleEnv creates txns with fv, lv that don't actually fit the round
diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go
index ba7df73e9..130167bf0 100644
--- a/data/transactions/logic/eval_test.go
+++ b/data/transactions/logic/eval_test.go
@@ -340,12 +340,12 @@ func TestSimpleMath(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testAccepts(t, "int 2; int 3; + ;int 5;==", 1)
- testAccepts(t, "int 22; int 3; - ;int 19;==", 1)
- testAccepts(t, "int 8; int 7; * ;int 56;==", 1)
- testAccepts(t, "int 21; int 7; / ;int 3;==", 1)
+ testAccepts(t, "int 2; int 3; + ; int 5; ==", 1)
+ testAccepts(t, "int 22; int 3; - ; int 19; ==", 1)
+ testAccepts(t, "int 8; int 7; * ; int 56; ==", 1)
+ testAccepts(t, "int 21; int 7; / ; int 3; ==", 1)
- testPanics(t, "int 1; int 2; - ;int 0; ==", 1)
+ testPanics(t, "int 1; int 2; - ; int 0; ==", 1)
}
func TestSha256EqArg(t *testing.T) {
@@ -896,6 +896,18 @@ func TestBytecTooFar(t *testing.T) {
testPanics(t, "byte 0x23; bytec_1; btoi", 1)
}
+func TestManualCBlockEval(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // TestManualCBlock in assembler_test.go demonstrates that these will use
+ // an inserted constant block.
+ testAccepts(t, "int 4; int 4; +; int 8; ==; return; intcblock 10", 2)
+ testAccepts(t, "b skip; intcblock 10; skip: int 4; int 4; +; int 8; ==;", 2)
+ testAccepts(t, "byte 0x2222; byte 0x2222; concat; len; int 4; ==; return; bytecblock 0x11", 2)
+ testAccepts(t, "b skip; bytecblock 0x11; skip: byte 0x2222; byte 0x2222; concat; len; int 4; ==", 2)
+}
+
func TestTxnBadField(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -962,7 +974,7 @@ func TestArg(t *testing.T) {
t.Parallel()
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- source := "arg 0; arg 1; ==; arg 2; arg 3; !=; &&; arg 4; len; int 9; <; &&;"
+ source := "arg 0; arg 1; ==; arg 2; arg 3; !=; &&; arg 4; len; int 9; <; &&; "
if v >= 5 {
source += "int 0; args; int 1; args; ==; assert; int 2; args; int 3; args; !=; assert"
}
@@ -1065,7 +1077,7 @@ const globalV7TestProgram = globalV6TestProgram + `
`
const globalV8TestProgram = globalV7TestProgram + `
-// No new globals in v7
+// No new globals in v8
`
func TestGlobal(t *testing.T) {
@@ -1563,7 +1575,8 @@ assert
int 1
`
-const testTxnProgramTextV8 = testTxnProgramTextV7
+const testTxnProgramTextV8 = testTxnProgramTextV7 + `
+`
func makeSampleTxn() transactions.SignedTxn {
var txn transactions.SignedTxn
@@ -2836,16 +2849,16 @@ func TestSlowLogic(t *testing.T) {
t.Parallel()
fragment := `byte 0x666E6F7264; keccak256
- byte 0xc195eca25a6f4c82bfba0287082ddb0d602ae9230f9cf1f1a40b68f8e2c41567; ==;`
+ byte 0xc195eca25a6f4c82bfba0287082ddb0d602ae9230f9cf1f1a40b68f8e2c41567; ==; `
// Sanity check. Running a short sequence of these fragments passes in all versions.
- source := fragment + strings.Repeat(fragment+"&&;", 5)
+ source := fragment + strings.Repeat(fragment+"&&; ", 5)
testAccepts(t, source, 1)
// in v1, each repeat costs 30
- v1overspend := fragment + strings.Repeat(fragment+"&&;", 20000/30)
+ v1overspend := fragment + strings.Repeat(fragment+"&&; ", 20000/30)
// in v2,v3 each repeat costs 134
- v2overspend := fragment + strings.Repeat(fragment+"&&;", 20000/134)
+ v2overspend := fragment + strings.Repeat(fragment+"&&; ", 20000/134)
// v1overspend fails (on v1)
ops := testProg(t, v1overspend, 1)
@@ -3546,8 +3559,7 @@ func benchmarkOperation(b *testing.B, prefix string, operation string, suffix st
b.Helper()
runs := 1 + b.N/2000
inst := strings.Count(operation, ";") + strings.Count(operation, "\n")
- source := prefix + ";" + strings.Repeat(operation+";", 2000) + ";" + suffix
- source = strings.ReplaceAll(source, ";", "\n")
+ source := prefix + ";" + strings.Repeat(operation+"\n", 2000) + ";" + suffix
ops := testProg(b, source, AssemblerMaxVersion)
evalLoop(b, runs, ops.Program)
b.ReportMetric(float64(inst), "extra/op")
@@ -3586,6 +3598,51 @@ func BenchmarkUintCmp(b *testing.B) {
})
}
}
+
+func BenchmarkDupnProto(b *testing.B) {
+ benches := [][]string{
+ {"dupn1", `
+ b main
+f:
+ proto 1 1
+ byte "repeat"
+ dupn 0 // return 1 string
+ retsub
+main:
+ int 777; dupn 0; // start with 1 int on stack
+`, "callsub f", "len"},
+ {"dupn10", `
+ b main
+f:
+ proto 10 10
+ byte "repeat"
+ dupn 9 // return 10 strings
+ retsub
+main:
+ int 777; dupn 9; // start with 10 ints on stack
+`, "callsub f", strings.Repeat("pop;", 9) + "len"},
+ {"dupn100", `
+ b main
+f:
+ proto 100 100
+ byte "repeat"
+ dupn 99 // return 100 strings
+ retsub
+main:
+ int 777; dupn 99; // start with 100 ints on stack
+`, "callsub f", strings.Repeat("pop;", 99) + "len"},
+ {"dp1", "int 777", "dupn 1; popn 1", ""},
+ {"dp10", "int 777", "dupn 10; popn 10", ""},
+ {"dp100", "int 777", "dupn 100; popn 100", ""},
+ }
+ for _, bench := range benches {
+ b.Run(bench[0], func(b *testing.B) {
+ b.ReportAllocs()
+ benchmarkOperation(b, bench[1], bench[2], bench[3])
+ })
+ }
+}
+
func BenchmarkByteLogic(b *testing.B) {
benches := [][]string{
{"b&", "", "byte 0x012345678901feab; byte 0x01ffffffffffffff; b&; pop", "int 1"},
@@ -3860,9 +3917,9 @@ func TestStackOverflow(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- source := "int 1; int 2;"
+ source := "int 1; int 2; "
for i := 1; i < maxStackDepth/2; i++ {
- source += "dup2;"
+ source += "dup2; "
}
testAccepts(t, source+"return", 2)
testPanics(t, source+"dup2; return", 2)
@@ -4193,7 +4250,7 @@ func notrack(program string) string {
return pragma + program
}
-type evalTester func(pass bool, err error) bool
+type evalTester func(t *testing.T, pass bool, err error) bool
func testEvaluation(t *testing.T, program string, introduced uint64, tester evalTester) error {
t.Helper()
@@ -4223,7 +4280,7 @@ func testEvaluation(t *testing.T, program string, introduced uint64, tester eval
require.NoError(t, err)
ep = defaultEvalParamsWithVersion(&txn, lv)
pass, err := EvalSignature(0, ep)
- ok := tester(pass, err)
+ ok := tester(t, pass, err)
if !ok {
t.Log(ep.Trace.String())
t.Log(err)
@@ -4243,22 +4300,36 @@ func testEvaluation(t *testing.T, program string, introduced uint64, tester eval
func testAccepts(t *testing.T, program string, introduced uint64) {
t.Helper()
- testEvaluation(t, program, introduced, func(pass bool, err error) bool {
+ testEvaluation(t, program, introduced, func(t *testing.T, pass bool, err error) bool {
return pass && err == nil
})
}
func testRejects(t *testing.T, program string, introduced uint64) {
t.Helper()
- testEvaluation(t, program, introduced, func(pass bool, err error) bool {
+ testEvaluation(t, program, introduced, func(t *testing.T, pass bool, err error) bool {
// Returned False, but didn't panic
return !pass && err == nil
})
}
-func testPanics(t *testing.T, program string, introduced uint64) error {
+func testPanics(t *testing.T, program string, introduced uint64, pattern ...string) error {
t.Helper()
- return testEvaluation(t, program, introduced, func(pass bool, err error) bool {
+ return testEvaluation(t, program, introduced, func(t *testing.T, pass bool, err error) bool {
+ t.Helper()
// TEAL panic! not just reject at exit
- return !pass && err != nil
+ if pass {
+ return false
+ }
+ if err == nil {
+ t.Log("program rejected rather panicked")
+ return false
+ }
+ for _, p := range pattern {
+ if !strings.Contains(err.Error(), p) {
+ t.Log(err, "does not contain", p)
+ return false
+ }
+ }
+ return true
})
}
@@ -4266,11 +4337,11 @@ func TestAssert(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testAccepts(t, "int 1;assert;int 1", 3)
- testRejects(t, "int 1;assert;int 0", 3)
- testPanics(t, "int 0;assert;int 1", 3)
- testPanics(t, notrack("assert;int 1"), 3)
- testPanics(t, notrack(`byte "john";assert;int 1`), 3)
+ testAccepts(t, "int 1; assert; int 1", 3)
+ testRejects(t, "int 1; assert; int 0", 3)
+ testPanics(t, "int 0; assert; int 1", 3)
+ testPanics(t, notrack("assert; int 1"), 3)
+ testPanics(t, notrack(`byte "john"; assert; int 1`), 3)
}
func TestBits(t *testing.T) {
@@ -4378,6 +4449,29 @@ func TestDig(t *testing.T) {
testPanics(t, notrack("int 3; int 2; int 1; dig 11; int 2; ==; return"), 3)
}
+func TestBury(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // bury 0 panics
+ source := "int 3; int 2; int 7; bury 0; int 1; return"
+ testProg(t, source, 8, Expect{1, "bury 0 always fails"})
+ testPanics(t, notrack("int 3; int 2; int 7; bury 0; int 1; return"), 8, "bury outside stack")
+
+ // bury 1 pops the ToS and replaces the thing "1 down", which becomes the new ToS
+ testAccepts(t, "int 3; int 2; int 7; bury 1; int 7; ==; assert; int 3; ==", 8)
+
+ // bury 2
+ testAccepts(t, `int 3; int 2; int 7;
+ bury 2;
+ int 2; ==; assert
+ int 7; ==;
+`, 8)
+
+ // bury too deep
+ testPanics(t, notrack("int 3; int 2; int 7; bury 3; int 1; return"), 8, "bury outside stack")
+}
+
func TestCover(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -4766,7 +4860,7 @@ func TestLog(t *testing.T) {
loglen: 2,
},
{
- source: fmt.Sprintf(`%s int 1`, strings.Repeat(`byte "a logging message"; log;`, maxLogCalls)),
+ source: fmt.Sprintf(`%s int 1`, strings.Repeat(`byte "a logging message"; log; `, maxLogCalls)),
loglen: maxLogCalls,
},
{
@@ -4811,7 +4905,7 @@ func TestLog(t *testing.T) {
runMode: modeApp,
},
{
- source: fmt.Sprintf(`%s; int 1`, strings.Repeat(`byte "a"; log;`, maxLogCalls+1)),
+ source: fmt.Sprintf(`%s; int 1`, strings.Repeat(`byte "a"; log; `, maxLogCalls+1)),
errContains: "too many log calls",
runMode: modeApp,
},
@@ -5123,7 +5217,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64;
int 0;
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": 3}, \"key5\": 18446744073709551615 }";
@@ -5131,7 +5225,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64;
int 18446744073709551615; //max uint64 value
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": 3}, \"key5\": 18446744073709551615 }";
@@ -5139,7 +5233,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte "algo";
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"\\u0061\\u006C\\u0067\\u006F\",\"key2\":{\"key3\": \"teal\", \"key4\": 3}, \"key5\": 18446744073709551615 }";
@@ -5147,7 +5241,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte "algo";
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": {\"key40\": 10}}, \"key5\": 18446744073709551615 }";
@@ -5159,7 +5253,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64
int 10
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": {\"key40\": 10}}, \"key5\": 18446744073709551615 }";
@@ -5169,7 +5263,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte "teal"
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"\\"teal\\"\", \"key4\": {\"key40\": 10}}, \"key5\": 18446744073709551615 }";
@@ -5179,7 +5273,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte ""teal"" // quotes match
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \" teal \", \"key4\": {\"key40\": 10}}, \"key5\": 18446744073709551615 }";
@@ -5189,7 +5283,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte " teal " // spaces match
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": {\"key40\": 10, \"key40\": \"10\"}}, \"key5\": 18446744073709551615 }";
@@ -5200,7 +5294,7 @@ func TestOpJSONRef(t *testing.T) {
byte "{\"key40\": 10, \"key40\": \"10\"}"
==
`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"rawId\": \"responseId\",\"id\": \"0\",\"response\": {\"attestationObject\": \"based64url_encoded_buffer\",\"clientDataJSON\": \" based64url_encoded_client_data\"},\"getClientExtensionResults\": {},\"type\": \"public-key\"}";
@@ -5208,7 +5302,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONObject;
byte "{\"attestationObject\": \"based64url_encoded_buffer\",\"clientDataJSON\": \" based64url_encoded_client_data\"}" // object as it appeared in input
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"rawId\": \"responseId\",\"id\": \"0\",\"response\": {\"attestationObject\": \"based64url_encoded_buffer\",\"clientD\\u0061taJSON\": \" based64url_encoded_client_data\"},\"getClientExtensionResults\": {},\"type\": \"public-key\"}";
@@ -5216,7 +5310,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONObject;
byte "{\"attestationObject\": \"based64url_encoded_buffer\",\"clientD\\u0061taJSON\": \" based64url_encoded_client_data\"}" // object as it appeared in input
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"rawId\": \"responseId\",\"id\": \"0\",\"response\": {\"attestationObject\": \"based64url_encoded_buffer\",\"clientDataJSON\": \" based64url_encoded_client_data\"},\"getClientExtensionResults\": {},\"type\": \"public-key\"}";
@@ -5226,7 +5320,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte " based64url_encoded_client_data";
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"\\u0072\\u0061\\u0077\\u0049\\u0044\": \"responseId\",\"id\": \"0\",\"response\": {\"attestationObject\": \"based64url_encoded_buffer\",\"clientDataJSON\": \" based64url_encoded_client_data\"},\"getClientExtensionResults\": {},\"type\": \"public-key\"}";
@@ -5234,7 +5328,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte "responseId"
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
// JavaScript MAX_SAFE_INTEGER
{
@@ -5243,7 +5337,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64;
int 9007199254740991;
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
// maximum uint64
{
@@ -5252,7 +5346,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64;
int 18446744073709551615;
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
// larger-than-uint64s are allowed if not requested
{
@@ -5261,7 +5355,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64;
int 0;
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
}
@@ -5301,52 +5395,52 @@ func TestOpJSONRef(t *testing.T) {
{
source: `byte "{\"key0\": 1 }"; byte "key0"; json_ref JSONString;`,
error: "json: cannot unmarshal number into Go value of type string",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": [1] }"; byte "key0"; json_ref JSONString;`,
error: "json: cannot unmarshal array into Go value of type string",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": {\"key1\":1} }"; byte "key0"; json_ref JSONString;`,
error: "json: cannot unmarshal object into Go value of type string",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": \"1\" }"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal string into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": [\"1\"] }"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal array into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": {\"key1\":1} }"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal object into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": [1]}"; byte "key0"; json_ref JSONObject;`,
error: "json: cannot unmarshal array into Go value of type map[string]json.RawMessage",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 1}"; byte "key0"; json_ref JSONObject;`,
error: "json: cannot unmarshal number into Go value of type map[string]json.RawMessage",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": \"1\"}"; byte "key0"; json_ref JSONObject;`,
error: "json: cannot unmarshal string into Go value of type map[string]json.RawMessage",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 1,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": [1,2,3]} }"; byte "key3"; json_ref JSONString;`,
error: "key key3 not found in JSON text",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 1,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": [1,2,3]}}";
@@ -5356,52 +5450,52 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString
`,
error: "key key5 not found in JSON text",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": -0,\"key1\": 2.5,\"key2\": -3}"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number -0 into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 1e10,\"key1\": 2.5,\"key2\": -3}"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number 1e10 into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0.2e-2,\"key1\": 2.5,\"key2\": -3}"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number 0.2e-2 into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 1.0,\"key1\": 2.5,\"key2\": -3}"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number 1.0 into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 1.0,\"key1\": 2.5,\"key2\": -3}"; byte "key1"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number 2.5 into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 1.0,\"key1\": 2.5,\"key2\": -3}"; byte "key2"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number -3 into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 18446744073709551616}"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number 18446744073709551616 into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 1,}"; byte "key0"; json_ref JSONString;`,
error: "error while parsing JSON text, invalid json text",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 1, \"key0\": \"3\"}"; byte "key0"; json_ref JSONString;`,
error: "error while parsing JSON text, invalid json text, duplicate keys not allowed",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": {\"key40\": 10, \"key40\": \"should fail!\"}}}";
@@ -5413,7 +5507,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString
`,
error: "error while parsing JSON text, invalid json text, duplicate keys not allowed",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}, {13, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}, {7, "unknown opcode: json_ref"}},
},
{
source: `byte "[1,2,3]";
@@ -5421,7 +5515,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64
`,
error: "error while parsing JSON text, invalid json text, only json object is allowed",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "2";
@@ -5429,7 +5523,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64
`,
error: "error while parsing JSON text, invalid json text, only json object is allowed",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "null";
@@ -5437,7 +5531,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64
`,
error: "error while parsing JSON text, invalid json text, only json object is allowed",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "true";
@@ -5445,7 +5539,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64
`,
error: "error while parsing JSON text, invalid json text, only json object is allowed",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "\"sometext\"";
@@ -5453,7 +5547,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64
`,
error: "error while parsing JSON text, invalid json text, only json object is allowed",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "{noquotes: \"shouldn't work\"}";
@@ -5462,7 +5556,7 @@ func TestOpJSONRef(t *testing.T) {
byte "shouldn't work";
==`,
error: "error while parsing JSON text, invalid json text",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
// max uint64 + 1 should fail
{
@@ -5472,7 +5566,7 @@ func TestOpJSONRef(t *testing.T) {
int 1;
return`,
error: "json: cannot unmarshal number 18446744073709551616 into Go value of type uint64",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
}
@@ -5510,6 +5604,90 @@ func TestOpJSONRef(t *testing.T) {
}
func TestTypeComplaints(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
testProg(t, "err; store 0", AssemblerMaxVersion)
testProg(t, "int 1; return; store 0", AssemblerMaxVersion)
}
+
+func TestSwitchInt(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // take the 0th label
+ testAccepts(t, `
+int 0
+switch zero one
+err
+zero: int 1; return
+one: int 0;
+`, 8)
+
+ // take the 1th label
+ testRejects(t, `
+int 1
+switch zero one
+err
+zero: int 1; return
+one: int 0;
+`, 8)
+
+ // same, but jumping to end of program
+ testAccepts(t, `
+int 1; dup
+switch zero one
+zero: err
+one:
+`, 8)
+
+ // no match
+ testAccepts(t, `
+int 2
+switch zero one
+int 1; return // falls through to here
+zero: int 0; return
+one: int 0; return
+`, 8)
+
+ // jump forward and backward
+ testAccepts(t, `
+int 0
+start:
+int 1
++
+dup
+int 1
+-
+switch start end
+err
+end:
+int 2
+==
+assert
+int 1
+`, 8)
+
+ // 0 labels are allowed, but weird!
+ testAccepts(t, `
+int 0
+switch
+int 1
+`, 8)
+
+ testPanics(t, notrack("switch; int 1"), 8)
+
+ // make the switch the final instruction
+ testAccepts(t, `
+int 1
+int 0
+switch done1 done2; done1: ; done2: ;
+`, 8)
+
+ // make the switch the final instruction, and don't match
+ testAccepts(t, `
+int 1
+int 88
+switch done1 done2; done1: ; done2: ;
+`, 8)
+}
diff --git a/data/transactions/logic/fields_test.go b/data/transactions/logic/fields_test.go
index 0a2928813..2b5008f5c 100644
--- a/data/transactions/logic/fields_test.go
+++ b/data/transactions/logic/fields_test.go
@@ -225,7 +225,7 @@ func TestAssetParamsFieldsVersions(t *testing.T) {
ep, _, _ := makeSampleEnv()
ep.Proto.LogicSigVersion = v
if field.version > v {
- testProg(t, text, v, Expect{3, "...was introduced in..."})
+ testProg(t, text, v, Expect{1, "...was introduced in..."})
ops := testProg(t, text, field.version) // assemble in the future
ops.Program[0] = byte(v)
testAppBytes(t, ops.Program, ep, "invalid asset_params_get field")
diff --git a/data/transactions/logic/frames.go b/data/transactions/logic/frames.go
new file mode 100644
index 000000000..e145ac8fc
--- /dev/null
+++ b/data/transactions/logic/frames.go
@@ -0,0 +1,128 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package logic
+
+import (
+ "errors"
+ "fmt"
+)
+
+func opProto(cx *EvalContext) error {
+ if !cx.fromCallsub {
+ return fmt.Errorf("proto was executed without a callsub")
+ }
+ cx.fromCallsub = false
+ nargs := int(cx.program[cx.pc+1])
+ if nargs > len(cx.stack) {
+ return fmt.Errorf("callsub to proto that requires %d args with stack height %d", nargs, len(cx.stack))
+ }
+ top := len(cx.callstack) - 1
+ cx.callstack[top].clear = true
+ cx.callstack[top].args = nargs
+ cx.callstack[top].returns = int(cx.program[cx.pc+2])
+ return nil
+}
+
+func opFrameDig(cx *EvalContext) error {
+ i := int8(cx.program[cx.pc+1])
+
+ top := len(cx.callstack) - 1
+ if top < 0 {
+ return errors.New("frame_dig with empty callstack")
+ }
+
+ frame := cx.callstack[top]
+ // If proto was used, don't allow `frame_dig` to go below specified args
+ if frame.clear && -int(i) > frame.args {
+ return fmt.Errorf("frame_dig %d in sub with %d args", i, frame.args)
+ }
+ idx := frame.height + int(i)
+ if idx >= len(cx.stack) {
+ return errors.New("frame_dig above stack")
+ }
+ if idx < 0 {
+ return errors.New("frame_dig below stack")
+ }
+
+ cx.stack = append(cx.stack, cx.stack[idx])
+ return nil
+}
+func opFrameBury(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // value
+ i := int8(cx.program[cx.pc+1])
+
+ top := len(cx.callstack) - 1
+ if top < 0 {
+ return errors.New("frame_bury with empty callstack")
+ }
+
+ frame := cx.callstack[top]
+ // If proto was used, don't allow `frame_bury` to go below specified args
+ if frame.clear && -int(i) > frame.args {
+ return fmt.Errorf("frame_bury %d in sub with %d args", i, frame.args)
+ }
+ idx := frame.height + int(i)
+ if idx >= last {
+ return errors.New("frame_bury above stack")
+ }
+ if idx < 0 {
+ return errors.New("frame_bury below stack")
+ }
+ cx.stack[idx] = cx.stack[last]
+ cx.stack = cx.stack[:last] // pop value
+ return nil
+}
+func opBury(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // value
+ i := int(cx.program[cx.pc+1])
+
+ idx := last - i
+ if idx < 0 || idx == last {
+ return errors.New("bury outside stack")
+ }
+ cx.stack[idx] = cx.stack[last]
+ cx.stack = cx.stack[:last] // pop value
+ return nil
+}
+
+func opPopN(cx *EvalContext) error {
+ n := cx.program[cx.pc+1]
+ top := len(cx.stack) - int(n)
+ if top < 0 {
+ return fmt.Errorf("popn %d while stack contains %d", n, len(cx.stack))
+ }
+ cx.stack = cx.stack[:top] // pop value
+ return nil
+}
+
+func opDupN(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // value
+
+ n := int(cx.program[cx.pc+1])
+ finalLen := len(cx.stack) + n
+ if cap(cx.stack) < finalLen {
+ // Let's grow all at once, plus a little slack.
+ newStack := make([]stackValue, len(cx.stack), finalLen+4)
+ copy(newStack, cx.stack)
+ cx.stack = newStack
+ }
+ for i := 0; i < n; i++ {
+ // There will be enough room that this will not allocate
+ cx.stack = append(cx.stack, cx.stack[last])
+ }
+ return nil
+}
diff --git a/data/transactions/logic/frames_test.go b/data/transactions/logic/frames_test.go
new file mode 100644
index 000000000..f1c1780c3
--- /dev/null
+++ b/data/transactions/logic/frames_test.go
@@ -0,0 +1,496 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package logic
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+const frameNonsense = `
+ return // proto subs must appear in deadcode
+ double:
+ proto 1 2
+ frame_dig -1
+ int 2
+ *
+ frame_bury 0
+ retsub
+ pushint 2
+ popn 1
+ dupn 4
+ bury 9
+`
+
+const frameCompiled = "438a01028bff240b8c00898102460147044509"
+
+func TestDupPopN(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // These two are equally dumbs uses of popn, and should perhaps be banned
+ testAccepts(t, "int 1; popn 0", fpVersion)
+ testAccepts(t, "int 1; dup; popn 1;", fpVersion)
+
+ testAccepts(t, "int 1; int 1; int 1; popn 2", fpVersion)
+ testAccepts(t, "int 1; int 0; popn 1", fpVersion)
+ testPanics(t, "int 1; int 0; popn 2", fpVersion)
+ testProg(t, "int 1; int 0; popn 3", LogicVersion, Expect{1, "popn 3 expects 3..."})
+ testPanics(t, notrack("int 1; int 0; popn 3"), fpVersion)
+
+ testAccepts(t, `int 7; dupn 250; dupn 250; dupn 250; dupn 249;
+ popn 250; popn 250; popn 250; popn 249; int 7; ==`,
+ fpVersion)
+ // We could detect this in assembler if we checked pgm.stack > maxStackDepth
+ // at each step. But it seems vanishly unlikely to have a detetectable
+ // instance of this bug in real code.
+ testPanics(t, `int 1; dupn 250; dupn 250; dupn 250; dupn 250
+ popn 250; popn 250; popn 250; popn 250; !`,
+ fpVersion, "stack overflow")
+}
+
+func TestDupPopNTyping(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ testProg(t, "int 8; dupn 2; +; pop", LogicVersion)
+ testProg(t, "int 8; dupn 2; concat; pop", LogicVersion, Expect{1, "...wanted type []byte..."})
+
+ testProg(t, "popn 1", LogicVersion, Expect{1, "...expects 1 stack argument..."})
+}
+
+func TestSimpleFrame(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ testAccepts(t, `
+ int 3
+ int 4
+ callsub hyp
+ int 5
+ ==
+ return
+ hyp:
+ proto 2 1
+ dupn 1 // room for the return value
+ frame_dig -1
+ frame_dig -1
+ *
+ frame_dig -2
+ frame_dig -2
+ *
+ +
+ sqrt
+ frame_bury 0 // place return value
+ retsub
+`, fpVersion)
+}
+
+func TestProtoChecks(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ // We normally report a non-deadcode `proto` at assembly time. But it still
+ // must fail at evaluation.
+ testPanics(t, notrack("proto 0 0; int 1"), fpVersion, "proto was executed without a callsub")
+ testAccepts(t, "callsub a; a: proto 0 0; int 1", fpVersion)
+
+ // the assembler could detect this, since we know stack height, but it's
+ // rare to KNOW the height, and hard to get the knowledge to the right place
+ testPanics(t, `
+ callsub toodeep
+toodeep:
+ proto 10 1
+ int 1
+ return
+`, fpVersion, "callsub to proto that requires 10 args")
+
+ // the assembler could detect this, since sub is one basic block
+ testPanics(t, `
+ int 5; int 10; callsub eatsargs
+ int 1; return
+eatsargs:
+ proto 2 1
+ +
+ retsub
+`, fpVersion, "retsub executed with stack below frame")
+
+ // the assembler could detect this, since sub is one basic block
+ testPanics(t, `
+ int 5; int 10; callsub donothing
+ int 1; return
+donothing: // does not leave return value above args
+ proto 2 1
+ retsub
+`, fpVersion, "retsub executed with no return values on stack")
+
+ // the assembler could detect this, since sub is one basic block
+ testPanics(t, `
+ int 5; int 10; callsub only1
+ int 1; return
+only1: // leaves only 1 return val
+ proto 2 2
+ dup2; +
+ retsub
+`, fpVersion, "retsub executed with 1 return values on stack")
+
+ testAccepts(t, `
+ int 5; int 10; callsub fine
+ int 1; return
+fine:
+ proto 2 2
+ dup2
+ retsub
+`, fpVersion)
+
+ testAccepts(t, `
+ int 5; int 10; callsub extra
+ int 1; return
+extra:
+ proto 2 2
+ dup2; dup2
+ retsub
+`, fpVersion)
+
+ // the assembler could potentially complain about the stack going below fp,
+ // since the sub is one basic block.
+ testAccepts(t, `
+ int 10
+ int 20
+ callsub main
+ int 1; return
+main:
+ proto 2 1
+ + // This consumes the top arg. We could complain in assembly if checked stack height against pgm.fp
+ dup; dup // But the dup;dup restores it, so it _evals_ fine.
+ retsub
+`, AssemblerMaxVersion)
+
+}
+
+func TestVoidSub(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ testAccepts(t, `
+ b main
+ a: proto 0 0
+ int 4 // junk local should get cleared
+ retsub
+ main: callsub a
+ int 1 // would fail because of two stack items unless 4 cleared
+`, fpVersion)
+
+ testPanics(t, `
+ b main
+ a: int 4 // junk local should not get cleared (no "proto")
+ retsub
+ main: callsub a
+ int 1 // fails because two items on stack
+`, 4) // test back to retsub introduction
+}
+
+func TestForgetReturn(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ testAccepts(t, `
+ b main
+ a: proto 0 1
+ int 5 // Just placing on stack is a fine way to return
+ retsub
+ main: callsub a
+ int 5
+ ==
+`, fpVersion)
+
+ testPanics(t, `
+ b main
+ a: proto 0 1
+ // Oops. No return value
+ retsub
+ main: callsub a
+ !
+`, fpVersion, "retsub executed with no return values on stack")
+
+ testPanics(t, `
+ b main
+ a: proto 0 3
+ int 1; int 2 // only 2. need 3
+ retsub
+ main: callsub a
+ !
+`, fpVersion, "retsub executed with 2 return values on stack")
+
+ // Extra is fine. They are "locals", and they are cleared
+ testAccepts(t, `
+ b main
+ a: proto 0 3
+ int 7; dupn 3 // height grows by 4. only needed 3
+ retsub
+ main: callsub a // returns 3 7s
+ +; +; int 21; ==
+`, fpVersion)
+}
+
+func TestFrameAccess(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ testAccepts(t, `
+ b main
+ add: proto 2 1
+ frame_dig -1
+ frame_dig -2
+ +
+ retsub
+ main: int 8
+ int 2
+ callsub add
+ int 10
+ ==
+`, fpVersion)
+
+ testAccepts(t, `
+ b main
+ ijsum:
+ proto 2 1
+ int 0; int 0 // room for sum and one "local", a loop variable
+
+ frame_dig -2 // first arg
+ frame_bury 1 // initialize loop var
+ loop:
+ // test for loop exit
+ frame_dig 1 // loop var
+ frame_dig -1 // second arg
+ >
+ bnz break
+
+ // add loop var into sum
+ frame_dig 1
+ frame_dig 0 // the sum, to be returned
+ +
+ frame_bury 0
+
+ // inc the loop var
+ frame_dig 1
+ int 1
+ +
+ frame_bury 1
+ b loop
+ break:
+ retsub // sum is sitting in frame_dig 0, which will end up ToS
+
+ main: int 2
+ int 8
+ callsub ijsum
+ int 35 // 2+3+4+5+6+7+8
+ ==
+`, fpVersion)
+
+ testPanics(t, notrack(`
+ b main
+ add: proto 2 1
+ frame_dig -1
+ frame_dig -3
+ +
+ retsub
+ main: int 8
+ int 2
+ callsub add
+ int 10
+ ==
+`), fpVersion, "frame_dig -3 in sub with 2")
+
+ testPanics(t, notrack(`
+ b main
+ add: proto 2 1
+ frame_dig -1
+ int 5
+ frame_bury -3
+ +
+ retsub
+ main: int 8
+ int 2
+ callsub add
+ int 10
+ ==
+`), fpVersion, "frame_bury -3 in sub with 2")
+
+ source := `
+ b main
+ add: proto 2 1
+ frame_dig 0 // return slot. but wasn't allocated
+ retsub
+ main: int 8
+ int 2
+ callsub add
+ int 1
+ return
+`
+ testProg(t, source, fpVersion, Expect{4, "frame_dig above stack"})
+ testPanics(t, notrack(source), fpVersion, "frame_dig above stack")
+
+ source = `
+ b main
+ add: proto 2 1
+ int 0; dupn 2 // allocate return slot plus two locals
+ frame_dig 3 // but look beyond
+ retsub
+ main: int 8
+ int 2
+ callsub add
+ int 1
+ return
+`
+ testProg(t, source, fpVersion, Expect{5, "frame_dig above stack"})
+ testPanics(t, notrack(source), fpVersion, "frame_dig above stack")
+
+ // Note that at the moment of frame_bury, the stack IS big enough, because
+ // the 4 would essentially be written over itself. But because frame_bury
+ // pops, we consider this to be beyond the stack.
+ source = `
+ b main
+ add: proto 2 1
+ int 0; dupn 2 // allocate return slot plus two locals
+ int 4
+ frame_bury 3 // but put "beyond"
+ retsub
+ main: int 8
+ int 2
+ callsub add
+ int 1
+ return
+`
+ testProg(t, source, fpVersion, Expect{6, "frame_bury above stack"})
+ testPanics(t, notrack(source), fpVersion, "frame_bury above stack")
+}
+
+func TestFrameAccesAtStart(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ testPanics(t, "frame_dig 1", fpVersion, "frame_dig with empty callstack")
+ testPanics(t, "int 7; frame_bury 1", fpVersion, "frame_bury with empty callstack")
+}
+
+func TestFrameAccessAboveStack(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ source := `
+ int 1
+ callsub main
+main:
+ proto 1 1
+ pop // argument popped
+ frame_dig -1 // but then frame_dig used to get at it
+`
+ testProg(t, source, fpVersion, Expect{7, "frame_dig above stack"})
+ testPanics(t, notrack(source), fpVersion, "frame_dig above stack")
+
+ testAccepts(t, `
+ int 2
+ callsub main
+ int 1; ==; return
+main:
+ proto 1 1
+ int 7
+ frame_dig 0; int 7; ==;
+ frame_bury 0;
+ retsub
+`, fpVersion)
+
+ // almost the same but try to use a "local" slot without pushing first
+ source = `
+ int 2
+ callsub main
+ int 1; ==; return
+main:
+ proto 1 1
+ int 7
+ frame_dig 1; int 7; ==;
+ frame_bury 1;
+ retsub
+`
+ testProg(t, source, fpVersion, Expect{8, "frame_dig above stack"})
+ testPanics(t, notrack(source), fpVersion)
+}
+
+func TestFrameAccessBelowStack(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ source := `
+ int 1
+ callsub main
+main:
+ proto 1 1
+ frame_dig -10 // digging down below arguments
+`
+ testProg(t, source, fpVersion, Expect{6, "frame_dig -10 in sub with 1 arg..."})
+ testPanics(t, notrack(source), fpVersion, "frame_dig -10 in sub with 1 arg")
+
+ testPanics(t, `
+ int 1
+ callsub main
+main:
+ frame_dig -10 // digging down below arguments
+`, fpVersion, "frame_dig below stack")
+
+ source = `
+ int 1
+ callsub main
+main:
+ proto 1 15
+ frame_bury -10 // burying down below arguments
+`
+ testProg(t, source, fpVersion, Expect{6, "frame_bury -10 in sub with 1 arg..."})
+ testPanics(t, notrack(source), fpVersion, "frame_bury -10 in sub with 1 arg")
+
+ // Without `proto`, frame_bury can't be checked by assembler, but still panics
+ source = `
+ int 1
+ callsub main
+main:
+ frame_bury -10 // burying down below arguments
+`
+ testPanics(t, source, fpVersion, "frame_bury below stack")
+
+}
+
+// TestDirectDig is an example of using dig instead of frame_dig, notice that
+// the offset needs to account for the added stack height of second call.
+func TestDirectDig(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ source := `
+ int 3
+ int 5
+ callsub double_both
+ +
+ int 16; ==; return
+double_both:
+ proto 2 2
+ dig 1; int 2; * // dig for first arg
+ dig 1; int 2; * // dig for second
+ retsub
+`
+ testProg(t, source, fpVersion)
+ testAccepts(t, source, fpVersion)
+}
diff --git a/data/transactions/logic/langspec.json b/data/transactions/logic/langspec.json
index 4e29b9a88..f4742f1b7 100644
--- a/data/transactions/logic/langspec.json
+++ b/data/transactions/logic/langspec.json
@@ -1,5 +1,5 @@
{
- "EvalMaxVersion": 7,
+ "EvalMaxVersion": 8,
"LogicSigVersion": 7,
"Ops": [
{
@@ -1029,6 +1029,38 @@
]
},
{
+ "Opcode": 69,
+ "Name": "bury",
+ "Args": ".",
+ "Size": 2,
+ "Doc": "Replace the Nth value from the top of the stack. bury 0 fails.",
+ "ImmediateNote": "{uint8 depth}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 70,
+ "Name": "popn",
+ "Size": 2,
+ "Doc": "Remove N values from the top of the stack",
+ "ImmediateNote": "{uint8 stack depth}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 71,
+ "Name": "dupn",
+ "Args": ".",
+ "Size": 2,
+ "Doc": "duplicate A, N times",
+ "ImmediateNote": "{uint8 copy count}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
"Opcode": 72,
"Name": "pop",
"Args": ".",
@@ -1299,7 +1331,7 @@
"Size": 2,
"Doc": "key B's value, of type R, from a [valid](jsonspec.md) utf-8 encoded json object A",
"DocExtra": "*Warning*: Usage should be restricted to very rare use cases, as JSON decoding is expensive and quite limited. In addition, JSON objects are large and not optimized for size.\n\nAlmost all smart contracts should use simpler and smaller methods (such as the [ABI](https://arc.algorand.foundation/ARCs/arc-0004). This opcode should only be used in cases where JSON is only available option, e.g. when a third-party only signs JSON.",
- "ImmediateNote": "{string return type}",
+ "ImmediateNote": "{uint8 return type}",
"Groups": [
"Byte Array Manipulation"
]
@@ -1560,7 +1592,7 @@
"Name": "callsub",
"Size": 3,
"Doc": "branch unconditionally to TARGET, saving the next instruction on the call stack",
- "DocExtra": "The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.",
+ "DocExtra": "The call stack is separate from the data stack. Only `callsub`, `retsub`, and `proto` manipulate it.",
"ImmediateNote": "{int16 branch offset, big-endian}",
"Groups": [
"Flow Control"
@@ -1571,7 +1603,51 @@
"Name": "retsub",
"Size": 1,
"Doc": "pop the top instruction from the call stack and branch to it",
- "DocExtra": "The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.",
+ "DocExtra": "If the current frame was prepared by `proto A R`, `retsub` will remove the 'A' arguments from the stack, move the `R` return values down, and pop any stack locations above the relocated return values.",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 138,
+ "Name": "proto",
+ "Size": 3,
+ "Doc": "Prepare top call frame for a retsub that will assume A args and R return values.",
+ "DocExtra": "Fails unless the last instruction executed was a `callsub`.",
+ "ImmediateNote": "{uint8 arguments} {uint8 return values}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 139,
+ "Name": "frame_dig",
+ "Returns": ".",
+ "Size": 2,
+ "Doc": "Nth (signed) value from the frame pointer.",
+ "ImmediateNote": "{int8 frame slot}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 140,
+ "Name": "frame_bury",
+ "Args": ".",
+ "Size": 2,
+ "Doc": "Replace the Nth (signed) value from the frame pointer in the stack",
+ "ImmediateNote": "{int8 frame slot}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 141,
+ "Name": "switch",
+ "Args": "U",
+ "Size": 0,
+ "Doc": "branch to the Ath label. Continue at following instruction if index A exceeds the number of labels.",
+ "ImmediateNote": "{uint8 branch count} [{int16 branch offset, big-endian}, ...]",
"Groups": [
"Flow Control"
]
@@ -2297,7 +2373,7 @@
"Args": "U",
"Returns": ".",
"Size": 2,
- "Doc": "field F of block A. Fail unless A falls between txn.LastValid-1002 and the current round (exclusive)",
+ "Doc": "field F of block A. Fail unless A falls between txn.LastValid-1002 and txn.FirstValid (exclusive)",
"ImmediateNote": "{uint8 block field}",
"Groups": [
"State Access"
diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go
index dc5627422..1efab93be 100644
--- a/data/transactions/logic/opcodes.go
+++ b/data/transactions/logic/opcodes.go
@@ -64,11 +64,12 @@ const appAddressAvailableVersion = 7
const fidoVersion = 7 // base64, json, secp256r1
const randomnessVersion = 7 // vrf_verify, block
+const fpVersion = 8 // changes for frame pointers and simpler function discipline
// EXPERIMENTAL. These should be revisited whenever a new LogicSigVersion is
// moved from vFuture to a new consensus version. If they remain unready, bump
// their version, and fixup TestAssemble() in assembler_test.go.
-const pairingVersion = 8 // bn256 opcodes. will add bls12-381, and unify the available opcodes.
+const pairingVersion = 9 // bn256 opcodes. will add bls12-381, and unify the available opcodes.
type linearCost struct {
baseCost int
@@ -122,6 +123,8 @@ type OpDetails struct {
FullCost linearCost // if non-zero, the cost of the opcode, no immediates matter
Size int // if non-zero, the known size of opcode. if 0, check() determines.
Immediates []immediate // details of each immediate arg to opcode
+
+ trusted bool // if `trusted`, don't check stack effects. they are more complicated than simply checking the opcode prototype.
}
func (d *OpDetails) docCost(argLen int) string {
@@ -167,16 +170,16 @@ func (d *OpDetails) Cost(program []byte, pc int, stack []stackValue) int {
return cost
}
-func opDefault() OpDetails {
- return OpDetails{asmDefault, nil, nil, modeAny, linearCost{baseCost: 1}, 1, nil}
+func detDefault() OpDetails {
+ return OpDetails{asmDefault, nil, nil, modeAny, linearCost{baseCost: 1}, 1, nil, false}
}
func constants(asm asmFunc, checker checkFunc, name string, kind immKind) OpDetails {
- return OpDetails{asm, checker, nil, modeAny, linearCost{baseCost: 1}, 0, []immediate{imm(name, kind)}}
+ return OpDetails{asm, checker, nil, modeAny, linearCost{baseCost: 1}, 0, []immediate{imm(name, kind)}, false}
}
-func opBranch() OpDetails {
- d := opDefault()
+func detBranch() OpDetails {
+ d := detDefault()
d.asm = asmBranch
d.check = checkBranch
d.Size = 3
@@ -184,60 +187,79 @@ func opBranch() OpDetails {
return d
}
+func detSwitch() OpDetails {
+ d := detDefault()
+ d.asm = asmSwitch
+ d.check = checkSwitch
+ d.Size = 0
+ d.Immediates = []immediate{imm("target ...", immLabels)}
+ return d
+}
+
func assembler(asm asmFunc) OpDetails {
- d := opDefault()
+ d := detDefault()
d.asm = asm
return d
}
func (d OpDetails) assembler(asm asmFunc) OpDetails {
- clone := d
- clone.asm = asm
- return clone
+ d.asm = asm
+ return d
}
func costly(cost int) OpDetails {
- d := opDefault()
+ d := detDefault()
d.FullCost.baseCost = cost
return d
}
func (d OpDetails) costs(cost int) OpDetails {
- clone := d
- clone.FullCost = linearCost{baseCost: cost}
- return clone
+ d.FullCost = linearCost{baseCost: cost}
+ return d
}
func only(m runMode) OpDetails {
- d := opDefault()
+ d := detDefault()
d.Modes = m
return d
}
func (d OpDetails) only(m runMode) OpDetails {
- clone := d
- clone.Modes = m
- return clone
+ d.Modes = m
+ return d
}
func (d OpDetails) costByLength(initial, perChunk, chunkSize, depth int) OpDetails {
- clone := d
- clone.FullCost = costByLength(initial, perChunk, chunkSize, depth).FullCost
- return clone
+ d.FullCost = costByLength(initial, perChunk, chunkSize, depth).FullCost
+ return d
}
func immediates(names ...string) OpDetails {
- d := opDefault()
+ return immKinded(immByte, names...)
+}
+
+func (d OpDetails) trust() OpDetails {
+ d.trusted = true
+ return d
+}
+
+func immKinded(kind immKind, names ...string) OpDetails {
+ d := detDefault()
d.Size = len(names) + 1
d.Immediates = make([]immediate, len(names))
for i, name := range names {
- d.Immediates[i] = imm(name, immByte)
+ d.Immediates[i] = imm(name, kind)
}
return d
}
-func stacky(typer refineFunc, imms ...string) OpDetails {
- d := immediates(imms...)
+func typed(typer refineFunc) OpDetails {
+ d := detDefault()
+ d.refine = typer
+ return d
+}
+
+func (d OpDetails) typed(typer refineFunc) OpDetails {
d.refine = typer
return d
}
@@ -273,7 +295,7 @@ func costByLength(initial, perChunk, chunkSize, depth int) OpDetails {
if initial < 1 || perChunk <= 0 || chunkSize < 1 || chunkSize > maxStringSize {
panic("bad cost configuration")
}
- d := opDefault()
+ d := detDefault()
d.FullCost = linearCost{initial, perChunk, chunkSize, depth}
return d
}
@@ -283,11 +305,13 @@ type immKind byte
const (
immByte immKind = iota
+ immInt8
immLabel
immInt
immBytes
immInts
immBytess // "ss" not a typo. Multiple "bytes"
+ immLabels
)
type immediate struct {
@@ -369,7 +393,7 @@ func (spec *OpSpec) deadens() bool {
// Note: assembly can specialize an Any return type if known at
// assembly-time, with ops.returns()
var OpSpecs = []OpSpec{
- {0x00, "err", opErr, proto(":x"), 1, opDefault()},
+ {0x00, "err", opErr, proto(":x"), 1, detDefault()},
{0x01, "sha256", opSHA256, proto("b:b"), 1, costly(7)},
{0x02, "keccak256", opKeccak256, proto("b:b"), 1, costly(26)},
{0x03, "sha512_256", opSHA512_256, proto("b:b"), 1, costly(9)},
@@ -399,43 +423,43 @@ var OpSpecs = []OpSpec{
{0x06, "ecdsa_pk_decompress", opEcdsaPkDecompress, proto("b:bb"), 5, costByField("v", &EcdsaCurves, ecdsaDecompressCosts)},
{0x07, "ecdsa_pk_recover", opEcdsaPkRecover, proto("bibb:bb"), 5, field("v", &EcdsaCurves).costs(2000)},
- {0x08, "+", opPlus, proto("ii:i"), 1, opDefault()},
- {0x09, "-", opMinus, proto("ii:i"), 1, opDefault()},
- {0x0a, "/", opDiv, proto("ii:i"), 1, opDefault()},
- {0x0b, "*", opMul, proto("ii:i"), 1, opDefault()},
- {0x0c, "<", opLt, proto("ii:i"), 1, opDefault()},
- {0x0d, ">", opGt, proto("ii:i"), 1, opDefault()},
- {0x0e, "<=", opLe, proto("ii:i"), 1, opDefault()},
- {0x0f, ">=", opGe, proto("ii:i"), 1, opDefault()},
- {0x10, "&&", opAnd, proto("ii:i"), 1, opDefault()},
- {0x11, "||", opOr, proto("ii:i"), 1, opDefault()},
- {0x12, "==", opEq, proto("aa:i"), 1, stacky(typeEquals)},
- {0x13, "!=", opNeq, proto("aa:i"), 1, stacky(typeEquals)},
- {0x14, "!", opNot, proto("i:i"), 1, opDefault()},
- {0x15, "len", opLen, proto("b:i"), 1, opDefault()},
- {0x16, "itob", opItob, proto("i:b"), 1, opDefault()},
- {0x17, "btoi", opBtoi, proto("b:i"), 1, opDefault()},
- {0x18, "%", opModulo, proto("ii:i"), 1, opDefault()},
- {0x19, "|", opBitOr, proto("ii:i"), 1, opDefault()},
- {0x1a, "&", opBitAnd, proto("ii:i"), 1, opDefault()},
- {0x1b, "^", opBitXor, proto("ii:i"), 1, opDefault()},
- {0x1c, "~", opBitNot, proto("i:i"), 1, opDefault()},
- {0x1d, "mulw", opMulw, proto("ii:ii"), 1, opDefault()},
- {0x1e, "addw", opAddw, proto("ii:ii"), 2, opDefault()},
+ {0x08, "+", opPlus, proto("ii:i"), 1, detDefault()},
+ {0x09, "-", opMinus, proto("ii:i"), 1, detDefault()},
+ {0x0a, "/", opDiv, proto("ii:i"), 1, detDefault()},
+ {0x0b, "*", opMul, proto("ii:i"), 1, detDefault()},
+ {0x0c, "<", opLt, proto("ii:i"), 1, detDefault()},
+ {0x0d, ">", opGt, proto("ii:i"), 1, detDefault()},
+ {0x0e, "<=", opLe, proto("ii:i"), 1, detDefault()},
+ {0x0f, ">=", opGe, proto("ii:i"), 1, detDefault()},
+ {0x10, "&&", opAnd, proto("ii:i"), 1, detDefault()},
+ {0x11, "||", opOr, proto("ii:i"), 1, detDefault()},
+ {0x12, "==", opEq, proto("aa:i"), 1, typed(typeEquals)},
+ {0x13, "!=", opNeq, proto("aa:i"), 1, typed(typeEquals)},
+ {0x14, "!", opNot, proto("i:i"), 1, detDefault()},
+ {0x15, "len", opLen, proto("b:i"), 1, detDefault()},
+ {0x16, "itob", opItob, proto("i:b"), 1, detDefault()},
+ {0x17, "btoi", opBtoi, proto("b:i"), 1, detDefault()},
+ {0x18, "%", opModulo, proto("ii:i"), 1, detDefault()},
+ {0x19, "|", opBitOr, proto("ii:i"), 1, detDefault()},
+ {0x1a, "&", opBitAnd, proto("ii:i"), 1, detDefault()},
+ {0x1b, "^", opBitXor, proto("ii:i"), 1, detDefault()},
+ {0x1c, "~", opBitNot, proto("i:i"), 1, detDefault()},
+ {0x1d, "mulw", opMulw, proto("ii:ii"), 1, detDefault()},
+ {0x1e, "addw", opAddw, proto("ii:ii"), 2, detDefault()},
{0x1f, "divmodw", opDivModw, proto("iiii:iiii"), 4, costly(20)},
{0x20, "intcblock", opIntConstBlock, proto(":"), 1, constants(asmIntCBlock, checkIntConstBlock, "uint ...", immInts)},
{0x21, "intc", opIntConstLoad, proto(":i"), 1, immediates("i").assembler(asmIntC)},
- {0x22, "intc_0", opIntConst0, proto(":i"), 1, opDefault()},
- {0x23, "intc_1", opIntConst1, proto(":i"), 1, opDefault()},
- {0x24, "intc_2", opIntConst2, proto(":i"), 1, opDefault()},
- {0x25, "intc_3", opIntConst3, proto(":i"), 1, opDefault()},
+ {0x22, "intc_0", opIntConst0, proto(":i"), 1, detDefault()},
+ {0x23, "intc_1", opIntConst1, proto(":i"), 1, detDefault()},
+ {0x24, "intc_2", opIntConst2, proto(":i"), 1, detDefault()},
+ {0x25, "intc_3", opIntConst3, proto(":i"), 1, detDefault()},
{0x26, "bytecblock", opByteConstBlock, proto(":"), 1, constants(asmByteCBlock, checkByteConstBlock, "bytes ...", immBytess)},
{0x27, "bytec", opByteConstLoad, proto(":b"), 1, immediates("i").assembler(asmByteC)},
- {0x28, "bytec_0", opByteConst0, proto(":b"), 1, opDefault()},
- {0x29, "bytec_1", opByteConst1, proto(":b"), 1, opDefault()},
- {0x2a, "bytec_2", opByteConst2, proto(":b"), 1, opDefault()},
- {0x2b, "bytec_3", opByteConst3, proto(":b"), 1, opDefault()},
+ {0x28, "bytec_0", opByteConst0, proto(":b"), 1, detDefault()},
+ {0x29, "bytec_1", opByteConst1, proto(":b"), 1, detDefault()},
+ {0x2a, "bytec_2", opByteConst2, proto(":b"), 1, detDefault()},
+ {0x2b, "bytec_3", opByteConst3, proto(":b"), 1, detDefault()},
{0x2c, "arg", opArg, proto(":b"), 1, immediates("n").only(modeSig).assembler(asmArg)},
{0x2d, "arg_0", opArg0, proto(":b"), 1, only(modeSig)},
{0x2e, "arg_1", opArg1, proto(":b"), 1, only(modeSig)},
@@ -446,8 +470,8 @@ var OpSpecs = []OpSpec{
{0x31, "txn", opTxn, proto(":a"), 1, field("f", &TxnScalarFields)},
{0x32, "global", opGlobal, proto(":a"), 1, field("f", &GlobalFields)},
{0x33, "gtxn", opGtxn, proto(":a"), 1, immediates("t", "f").field("f", &TxnScalarFields)},
- {0x34, "load", opLoad, proto(":a"), 1, stacky(typeLoad, "i")},
- {0x35, "store", opStore, proto("a:"), 1, stacky(typeStore, "i")},
+ {0x34, "load", opLoad, proto(":a"), 1, immediates("i").typed(typeLoad)},
+ {0x35, "store", opStore, proto("a:"), 1, immediates("i").typed(typeStore)},
{0x36, "txna", opTxna, proto(":a"), 2, immediates("f", "i").field("f", &TxnArrayFields)},
{0x37, "gtxna", opGtxna, proto(":a"), 2, immediates("t", "f", "i").field("f", &TxnArrayFields)},
// Like gtxn, but gets txn index from stack, rather than immediate arg
@@ -461,41 +485,41 @@ var OpSpecs = []OpSpec{
{0x3d, "gaids", opGaids, proto("i:i"), 4, only(modeApp)},
// Like load/store, but scratch slot taken from TOS instead of immediate
- {0x3e, "loads", opLoads, proto("i:a"), 5, stacky(typeLoads)},
- {0x3f, "stores", opStores, proto("ia:"), 5, stacky(typeStores)},
-
- {0x40, "bnz", opBnz, proto("i:"), 1, opBranch()},
- {0x41, "bz", opBz, proto("i:"), 2, opBranch()},
- {0x42, "b", opB, proto(":"), 2, opBranch()},
- {0x43, "return", opReturn, proto("i:x"), 2, opDefault()},
- {0x44, "assert", opAssert, proto("i:"), 3, opDefault()},
- {0x48, "pop", opPop, proto("a:"), 1, opDefault()},
- {0x49, "dup", opDup, proto("a:aa", "A, A"), 1, stacky(typeDup)},
- {0x4a, "dup2", opDup2, proto("aa:aaaa", "A, B, A, B"), 2, stacky(typeDupTwo)},
- // There must be at least one thing on the stack for dig, but
- // it would be nice if we did better checking than that.
- {0x4b, "dig", opDig, proto("a:aa", "A, [N items]", "A, [N items], A"), 3, stacky(typeDig, "n")},
- {0x4c, "swap", opSwap, proto("aa:aa", "B, A"), 3, stacky(typeSwap)},
- {0x4d, "select", opSelect, proto("aai:a", "A or B"), 3, stacky(typeSelect)},
- {0x4e, "cover", opCover, proto("a:a", "[N items], A", "A, [N items]"), 5, stacky(typeCover, "n")},
- {0x4f, "uncover", opUncover, proto("a:a", "A, [N items]", "[N items], A"), 5, stacky(typeUncover, "n")},
+ {0x3e, "loads", opLoads, proto("i:a"), 5, typed(typeLoads)},
+ {0x3f, "stores", opStores, proto("ia:"), 5, typed(typeStores)},
+
+ {0x40, "bnz", opBnz, proto("i:"), 1, detBranch()},
+ {0x41, "bz", opBz, proto("i:"), 2, detBranch()},
+ {0x42, "b", opB, proto(":"), 2, detBranch()},
+ {0x43, "return", opReturn, proto("i:x"), 2, detDefault()},
+ {0x44, "assert", opAssert, proto("i:"), 3, detDefault()},
+ {0x45, "bury", opBury, proto("a:"), fpVersion, immediates("n").typed(typeBury)},
+ {0x46, "popn", opPopN, proto(":", "[N items]", ""), fpVersion, immediates("n").typed(typePopN).trust()},
+ {0x47, "dupn", opDupN, proto("a:", "", "A, [N copies of A]"), fpVersion, immediates("n").typed(typeDupN).trust()},
+ {0x48, "pop", opPop, proto("a:"), 1, detDefault()},
+ {0x49, "dup", opDup, proto("a:aa", "A, A"), 1, typed(typeDup)},
+ {0x4a, "dup2", opDup2, proto("aa:aaaa", "A, B, A, B"), 2, typed(typeDupTwo)},
+ {0x4b, "dig", opDig, proto("a:aa", "A, [N items]", "A, [N items], A"), 3, immediates("n").typed(typeDig)},
+ {0x4c, "swap", opSwap, proto("aa:aa", "B, A"), 3, typed(typeSwap)},
+ {0x4d, "select", opSelect, proto("aai:a", "A or B"), 3, typed(typeSelect)},
+ {0x4e, "cover", opCover, proto("a:a", "[N items], A", "A, [N items]"), 5, immediates("n").typed(typeCover)},
+ {0x4f, "uncover", opUncover, proto("a:a", "A, [N items]", "[N items], A"), 5, immediates("n").typed(typeUncover)},
// byteslice processing / StringOps
- {0x50, "concat", opConcat, proto("bb:b"), 2, opDefault()},
+ {0x50, "concat", opConcat, proto("bb:b"), 2, detDefault()},
{0x51, "substring", opSubstring, proto("b:b"), 2, immediates("s", "e").assembler(asmSubstring)},
- {0x52, "substring3", opSubstring3, proto("bii:b"), 2, opDefault()},
- {0x53, "getbit", opGetBit, proto("ai:i"), 3, opDefault()},
- {0x54, "setbit", opSetBit, proto("aii:a"), 3, stacky(typeSetBit)},
- {0x55, "getbyte", opGetByte, proto("bi:i"), 3, opDefault()},
- {0x56, "setbyte", opSetByte, proto("bii:b"), 3, opDefault()},
+ {0x52, "substring3", opSubstring3, proto("bii:b"), 2, detDefault()},
+ {0x53, "getbit", opGetBit, proto("ai:i"), 3, detDefault()},
+ {0x54, "setbit", opSetBit, proto("aii:a"), 3, typed(typeSetBit)},
+ {0x55, "getbyte", opGetByte, proto("bi:i"), 3, detDefault()},
+ {0x56, "setbyte", opSetByte, proto("bii:b"), 3, detDefault()},
{0x57, "extract", opExtract, proto("b:b"), 5, immediates("s", "l")},
- {0x58, "extract3", opExtract3, proto("bii:b"), 5, opDefault()},
- {0x59, "extract_uint16", opExtract16Bits, proto("bi:i"), 5, opDefault()},
- {0x5a, "extract_uint32", opExtract32Bits, proto("bi:i"), 5, opDefault()},
- {0x5b, "extract_uint64", opExtract64Bits, proto("bi:i"), 5, opDefault()},
+ {0x58, "extract3", opExtract3, proto("bii:b"), 5, detDefault()},
+ {0x59, "extract_uint16", opExtract16Bits, proto("bi:i"), 5, detDefault()},
+ {0x5a, "extract_uint32", opExtract32Bits, proto("bi:i"), 5, detDefault()},
+ {0x5b, "extract_uint64", opExtract64Bits, proto("bi:i"), 5, detDefault()},
{0x5c, "replace2", opReplace2, proto("bb:b"), 7, immediates("s")},
- {0x5d, "replace3", opReplace3, proto("bib:b"), 7, opDefault()},
-
+ {0x5d, "replace3", opReplace3, proto("bib:b"), 7, detDefault()},
{0x5e, "base64_decode", opBase64Decode, proto("b:b"), fidoVersion, field("e", &Base64Encodings).costByLength(1, 1, 16, 0)},
{0x5f, "json_ref", opJSONRef, proto("bb:a"), fidoVersion, field("r", &JSONRefTypes).costByLength(25, 2, 7, 1)},
@@ -532,19 +556,24 @@ var OpSpecs = []OpSpec{
{0x84, "ed25519verify_bare", opEd25519VerifyBare, proto("bbb:i"), 7, costly(1900)},
// "Function oriented"
- {0x88, "callsub", opCallSub, proto(":"), 4, opBranch()},
- {0x89, "retsub", opRetSub, proto(":"), 4, opDefault()},
- // Leave a little room for indirect function calls, or similar
+ {0x88, "callsub", opCallSub, proto(":"), 4, detBranch()},
+ {0x89, "retsub", opRetSub, proto(":"), 4, detDefault().trust()},
+ // protoByte is a named constant because opCallSub needs to know it.
+ {protoByte, "proto", opProto, proto(":"), fpVersion, immediates("a", "r").typed(typeProto)},
+ {0x8b, "frame_dig", opFrameDig, proto(":a"), fpVersion, immKinded(immInt8, "i").typed(typeFrameDig)},
+ {0x8c, "frame_bury", opFrameBury, proto("a:"), fpVersion, immKinded(immInt8, "i").typed(typeFrameBury)},
+ {0x8d, "switch", opSwitch, proto("i:"), 8, detSwitch()},
+ // 0x8e will likely be a switch on pairs of values/targets, called `match`
// More math
- {0x90, "shl", opShiftLeft, proto("ii:i"), 4, opDefault()},
- {0x91, "shr", opShiftRight, proto("ii:i"), 4, opDefault()},
+ {0x90, "shl", opShiftLeft, proto("ii:i"), 4, detDefault()},
+ {0x91, "shr", opShiftRight, proto("ii:i"), 4, detDefault()},
{0x92, "sqrt", opSqrt, proto("i:i"), 4, costly(4)},
- {0x93, "bitlen", opBitLen, proto("a:i"), 4, opDefault()},
- {0x94, "exp", opExp, proto("ii:i"), 4, opDefault()},
+ {0x93, "bitlen", opBitLen, proto("a:i"), 4, detDefault()},
+ {0x94, "exp", opExp, proto("ii:i"), 4, detDefault()},
{0x95, "expw", opExpw, proto("ii:ii"), 4, costly(10)},
{0x96, "bsqrt", opBytesSqrt, proto("b:b"), 6, costly(40)},
- {0x97, "divw", opDivw, proto("iii:i"), 6, opDefault()},
+ {0x97, "divw", opDivw, proto("iii:i"), 6, detDefault()},
{0x98, "sha3_256", opSHA3_256, proto("b:b"), 7, costly(130)},
/* Will end up following keccak256 -
{0x98, "sha3_256", opSHA3_256, proto("b:b"), unlimitedStorage, costByLength(58, 4, 8)},},
@@ -553,30 +582,29 @@ var OpSpecs = []OpSpec{
{0x99, "bn256_add", opBn256Add, proto("bb:b"), pairingVersion, costly(70)},
{0x9a, "bn256_scalar_mul", opBn256ScalarMul, proto("bb:b"), pairingVersion, costly(970)},
{0x9b, "bn256_pairing", opBn256Pairing, proto("bb:i"), pairingVersion, costly(8700)},
- // leave room here for eip-2537 style opcodes
// Byteslice math.
{0xa0, "b+", opBytesPlus, proto("bb:b"), 4, costly(10)},
{0xa1, "b-", opBytesMinus, proto("bb:b"), 4, costly(10)},
{0xa2, "b/", opBytesDiv, proto("bb:b"), 4, costly(20)},
{0xa3, "b*", opBytesMul, proto("bb:b"), 4, costly(20)},
- {0xa4, "b<", opBytesLt, proto("bb:i"), 4, opDefault()},
- {0xa5, "b>", opBytesGt, proto("bb:i"), 4, opDefault()},
- {0xa6, "b<=", opBytesLe, proto("bb:i"), 4, opDefault()},
- {0xa7, "b>=", opBytesGe, proto("bb:i"), 4, opDefault()},
- {0xa8, "b==", opBytesEq, proto("bb:i"), 4, opDefault()},
- {0xa9, "b!=", opBytesNeq, proto("bb:i"), 4, opDefault()},
+ {0xa4, "b<", opBytesLt, proto("bb:i"), 4, detDefault()},
+ {0xa5, "b>", opBytesGt, proto("bb:i"), 4, detDefault()},
+ {0xa6, "b<=", opBytesLe, proto("bb:i"), 4, detDefault()},
+ {0xa7, "b>=", opBytesGe, proto("bb:i"), 4, detDefault()},
+ {0xa8, "b==", opBytesEq, proto("bb:i"), 4, detDefault()},
+ {0xa9, "b!=", opBytesNeq, proto("bb:i"), 4, detDefault()},
{0xaa, "b%", opBytesModulo, proto("bb:b"), 4, costly(20)},
{0xab, "b|", opBytesBitOr, proto("bb:b"), 4, costly(6)},
{0xac, "b&", opBytesBitAnd, proto("bb:b"), 4, costly(6)},
{0xad, "b^", opBytesBitXor, proto("bb:b"), 4, costly(6)},
{0xae, "b~", opBytesBitNot, proto("b:b"), 4, costly(4)},
- {0xaf, "bzero", opBytesZero, proto("i:b"), 4, opDefault()},
+ {0xaf, "bzero", opBytesZero, proto("i:b"), 4, detDefault()},
// AVM "effects"
{0xb0, "log", opLog, proto("b:"), 5, only(modeApp)},
{0xb1, "itxn_begin", opTxBegin, proto(":"), 5, only(modeApp)},
- {0xb2, "itxn_field", opItxnField, proto("a:"), 5, stacky(typeTxField, "f").field("f", &TxnFields).only(modeApp).assembler(asmItxnField)},
+ {0xb2, "itxn_field", opItxnField, proto("a:"), 5, immediates("f").typed(typeTxField).field("f", &TxnFields).only(modeApp).assembler(asmItxnField)},
{0xb3, "itxn_submit", opItxnSubmit, proto(":"), 5, only(modeApp)},
{0xb4, "itxn", opItxn, proto(":a"), 5, field("f", &TxnScalarFields).only(modeApp).assembler(asmItxn)},
{0xb5, "itxna", opItxna, proto(":a"), 5, immediates("f", "i").field("f", &TxnArrayFields).only(modeApp)},
diff --git a/data/transactions/logic/teal.tmLanguage.json b/data/transactions/logic/teal.tmLanguage.json
index 127f12961..7a299a962 100644
--- a/data/transactions/logic/teal.tmLanguage.json
+++ b/data/transactions/logic/teal.tmLanguage.json
@@ -64,7 +64,7 @@
},
{
"name": "keyword.control.teal",
- "match": "^(assert|b|bnz|bz|callsub|cover|dig|dup|dup2|err|pop|retsub|return|select|swap|uncover)\\b"
+ "match": "^(assert|b|bnz|bury|bz|callsub|cover|dig|dup|dup2|dupn|err|frame_bury|frame_dig|pop|popn|proto|retsub|return|select|swap|switch|uncover)\\b"
},
{
"name": "keyword.other.teal",
diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go
index 1d947d31a..4d0bd2717 100644
--- a/data/transactions/verify/txn.go
+++ b/data/transactions/verify/txn.go
@@ -99,62 +99,43 @@ func (g *GroupContext) Equal(other *GroupContext) bool {
g.minAvmVersion == other.minAvmVersion
}
-// Txn verifies a SignedTxn as being signed and having no obviously inconsistent data.
+// txnBatchPrep verifies a SignedTxn having no obviously inconsistent data.
// Block-assembly time checks of LogicSig and accounting rules may still block the txn.
-func Txn(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext) error {
- batchVerifier := crypto.MakeBatchVerifier()
-
- if err := TxnBatchVerify(s, txnIdx, groupCtx, batchVerifier); err != nil {
- return err
- }
-
- // this case is used for comapact certificate where no signature is supplied
- if batchVerifier.GetNumberOfEnqueuedSignatures() == 0 {
- return nil
- }
- if err := batchVerifier.Verify(); err != nil {
- return err
- }
- return nil
-}
-
-// TxnBatchVerify verifies a SignedTxn having no obviously inconsistent data.
-// Block-assembly time checks of LogicSig and accounting rules may still block the txn.
-// it is the caller responsibility to call batchVerifier.verify()
-func TxnBatchVerify(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext, verifier *crypto.BatchVerifier) error {
+// it is the caller responsibility to call batchVerifier.Verify()
+func txnBatchPrep(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext, verifier *crypto.BatchVerifier) error {
if !groupCtx.consensusParams.SupportRekeying && (s.AuthAddr != basics.Address{}) {
- return errors.New("nonempty AuthAddr but rekeying not supported")
+ return errors.New("nonempty AuthAddr but rekeying is not supported")
}
if err := s.Txn.WellFormed(groupCtx.specAddrs, groupCtx.consensusParams); err != nil {
return err
}
- return stxnVerifyCore(s, txnIdx, groupCtx, verifier)
+ return stxnCoreChecks(s, txnIdx, groupCtx, verifier)
}
// TxnGroup verifies a []SignedTxn as being signed and having no obviously inconsistent data.
func TxnGroup(stxs []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader, cache VerifiedTransactionCache, ledger logic.LedgerForSignature) (groupCtx *GroupContext, err error) {
batchVerifier := crypto.MakeBatchVerifier()
- if groupCtx, err = TxnGroupBatchVerify(stxs, contextHdr, cache, ledger, batchVerifier); err != nil {
+ if groupCtx, err = txnGroupBatchPrep(stxs, contextHdr, ledger, batchVerifier); err != nil {
return nil, err
}
- if batchVerifier.GetNumberOfEnqueuedSignatures() == 0 {
- return groupCtx, nil
- }
-
if err := batchVerifier.Verify(); err != nil {
return nil, err
}
+ if cache != nil {
+ cache.Add(stxs, groupCtx)
+ }
+
return
}
-// TxnGroupBatchVerify verifies a []SignedTxn having no obviously inconsistent data.
-// it is the caller responsibility to call batchVerifier.verify()
-func TxnGroupBatchVerify(stxs []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader, cache VerifiedTransactionCache, ledger logic.LedgerForSignature, verifier *crypto.BatchVerifier) (groupCtx *GroupContext, err error) {
+// txnGroupBatchPrep verifies a []SignedTxn having no obviously inconsistent data.
+// it is the caller responsibility to call batchVerifier.Verify()
+func txnGroupBatchPrep(stxs []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader, ledger logic.LedgerForSignature, verifier *crypto.BatchVerifier) (groupCtx *GroupContext, err error) {
groupCtx, err = PrepareGroupContext(stxs, contextHdr, ledger)
if err != nil {
return nil, err
@@ -163,7 +144,7 @@ func TxnGroupBatchVerify(stxs []transactions.SignedTxn, contextHdr bookkeeping.B
minFeeCount := uint64(0)
feesPaid := uint64(0)
for i, stxn := range stxs {
- err = TxnBatchVerify(&stxn, i, groupCtx, verifier)
+ err = txnBatchPrep(&stxn, i, groupCtx, verifier)
if err != nil {
err = fmt.Errorf("transaction %+v invalid : %w", stxn, err)
return
@@ -187,13 +168,10 @@ func TxnGroupBatchVerify(stxs []transactions.SignedTxn, contextHdr bookkeeping.B
return
}
- if cache != nil {
- cache.Add(stxs, groupCtx)
- }
return
}
-func stxnVerifyCore(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext, batchVerifier *crypto.BatchVerifier) error {
+func stxnCoreChecks(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext, batchVerifier *crypto.BatchVerifier) error {
numSigs := 0
hasSig := false
hasMsig := false
@@ -230,16 +208,13 @@ func stxnVerifyCore(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContex
return nil
}
if hasMsig {
- if ok, _ := crypto.MultisigBatchVerify(s.Txn,
- crypto.Digest(s.Authorizer()),
- s.Msig,
- batchVerifier); ok {
- return nil
+ if err := crypto.MultisigBatchPrep(s.Txn, crypto.Digest(s.Authorizer()), s.Msig, batchVerifier); err != nil {
+ return fmt.Errorf("multisig validation failed: %w", err)
}
- return errors.New("multisig validation failed")
+ return nil
}
if hasLogicSig {
- return logicSigBatchVerify(s, txnIdx, groupCtx)
+ return logicSigVerify(s, txnIdx, groupCtx)
}
return errors.New("has one mystery sig. WAT?")
}
@@ -249,25 +224,16 @@ func stxnVerifyCore(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContex
func LogicSigSanityCheck(txn *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext) error {
batchVerifier := crypto.MakeBatchVerifier()
- if err := LogicSigSanityCheckBatchVerify(txn, groupIndex, groupCtx, batchVerifier); err != nil {
+ if err := logicSigSanityCheckBatchPrep(txn, groupIndex, groupCtx, batchVerifier); err != nil {
return err
}
-
- // in case of contract account the signature len might 0. that's ok
- if batchVerifier.GetNumberOfEnqueuedSignatures() == 0 {
- return nil
- }
-
- if err := batchVerifier.Verify(); err != nil {
- return err
- }
- return nil
+ return batchVerifier.Verify()
}
-// LogicSigSanityCheckBatchVerify checks that the signature is valid and that the program is basically well formed.
+// logicSigSanityCheckBatchPrep checks that the signature is valid and that the program is basically well formed.
// It does not evaluate the logic.
-// it is the caller responsibility to call batchVerifier.verify()
-func LogicSigSanityCheckBatchVerify(txn *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext, batchVerifier *crypto.BatchVerifier) error {
+// it is the caller responsibility to call batchVerifier.Verify()
+func logicSigSanityCheckBatchPrep(txn *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext, batchVerifier *crypto.BatchVerifier) error {
lsig := txn.Lsig
if groupCtx.consensusParams.LogicSigVersion == 0 {
@@ -329,16 +295,15 @@ func LogicSigSanityCheckBatchVerify(txn *transactions.SignedTxn, groupIndex int,
batchVerifier.EnqueueSignature(crypto.PublicKey(txn.Authorizer()), &program, lsig.Sig)
} else {
program := logic.Program(lsig.Logic)
- if ok, _ := crypto.MultisigBatchVerify(&program, crypto.Digest(txn.Authorizer()), lsig.Msig, batchVerifier); !ok {
- return errors.New("logic multisig validation failed")
+ if err := crypto.MultisigBatchPrep(&program, crypto.Digest(txn.Authorizer()), lsig.Msig, batchVerifier); err != nil {
+ return fmt.Errorf("logic multisig validation failed: %w", err)
}
}
return nil
}
-// logicSigBatchVerify checks that the signature is valid, executing the program.
-// it is the caller responsibility to call batchVerifier.verify()
-func logicSigBatchVerify(txn *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext) error {
+// logicSigVerify checks that the signature is valid, executing the program.
+func logicSigVerify(txn *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext) error {
err := LogicSigSanityCheck(txn, groupIndex, groupCtx)
if err != nil {
return err
@@ -410,17 +375,15 @@ func PaysetGroups(ctx context.Context, payset [][]transactions.SignedTxn, blkHea
batchVerifier := crypto.MakeBatchVerifierWithHint(len(payset))
for i, signTxnsGrp := range txnGroups {
- groupCtxs[i], grpErr = TxnGroupBatchVerify(signTxnsGrp, blkHeader, nil, ledger, batchVerifier)
+ groupCtxs[i], grpErr = txnGroupBatchPrep(signTxnsGrp, blkHeader, ledger, batchVerifier)
// abort only if it's a non-cache error.
if grpErr != nil {
return grpErr
}
}
- if batchVerifier.GetNumberOfEnqueuedSignatures() != 0 {
- verifyErr := batchVerifier.Verify()
- if verifyErr != nil {
- return verifyErr
- }
+ verifyErr := batchVerifier.Verify()
+ if verifyErr != nil {
+ return verifyErr
}
cache.AddPayset(txnGroups, groupCtxs)
return nil
diff --git a/data/transactions/verify/txn_test.go b/data/transactions/verify/txn_test.go
index 399835211..ea3f05b87 100644
--- a/data/transactions/verify/txn_test.go
+++ b/data/transactions/verify/txn_test.go
@@ -51,6 +51,15 @@ var spec = transactions.SpecialAddresses{
RewardsPool: poolAddr,
}
+func verifyTxn(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext) error {
+ batchVerifier := crypto.MakeBatchVerifier()
+
+ if err := txnBatchPrep(s, txnIdx, groupCtx, batchVerifier); err != nil {
+ return err
+ }
+ return batchVerifier.Verify()
+}
+
func keypair() *crypto.SignatureSecrets {
var seed crypto.Seed
crypto.RandBytes(seed[:])
@@ -117,14 +126,14 @@ func TestSignedPayment(t *testing.T) {
groupCtx, err := PrepareGroupContext(stxns, blockHeader, nil)
require.NoError(t, err)
require.NoError(t, payment.WellFormed(spec, proto), "generateTestObjects generated an invalid payment")
- require.NoError(t, Txn(&stxn, 0, groupCtx), "generateTestObjects generated a bad signedtxn")
+ require.NoError(t, verifyTxn(&stxn, 0, groupCtx), "generateTestObjects generated a bad signedtxn")
stxn2 := payment.Sign(secret)
require.Equal(t, stxn2.Sig, stxn.Sig, "got two different signatures for the same transaction (our signing function is deterministic)")
stxn2.MessUpSigForTesting()
require.Equal(t, stxn.ID(), stxn2.ID(), "changing sig caused txid to change")
- require.Error(t, Txn(&stxn2, 0, groupCtx), "verify succeeded with bad sig")
+ require.Error(t, verifyTxn(&stxn2, 0, groupCtx), "verify succeeded with bad sig")
require.True(t, crypto.SignatureVerifier(addr).Verify(payment, stxn.Sig), "signature on the transaction is not the signature of the hash of the transaction under the spender's key")
}
@@ -137,7 +146,7 @@ func TestTxnValidationEncodeDecode(t *testing.T) {
for _, txn := range signed {
groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{txn}, blockHeader, nil)
require.NoError(t, err)
- if Txn(&txn, 0, groupCtx) != nil {
+ if verifyTxn(&txn, 0, groupCtx) != nil {
t.Errorf("signed transaction %#v did not verify", txn)
}
@@ -145,7 +154,7 @@ func TestTxnValidationEncodeDecode(t *testing.T) {
var signedTx transactions.SignedTxn
protocol.Decode(x, &signedTx)
- if Txn(&signedTx, 0, groupCtx) != nil {
+ if verifyTxn(&signedTx, 0, groupCtx) != nil {
t.Errorf("signed transaction %#v did not verify", txn)
}
}
@@ -159,14 +168,14 @@ func TestTxnValidationEmptySig(t *testing.T) {
for _, txn := range signed {
groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{txn}, blockHeader, nil)
require.NoError(t, err)
- if Txn(&txn, 0, groupCtx) != nil {
+ if verifyTxn(&txn, 0, groupCtx) != nil {
t.Errorf("signed transaction %#v did not verify", txn)
}
txn.Sig = crypto.Signature{}
txn.Msig = crypto.MultisigSig{}
txn.Lsig = transactions.LogicSig{}
- if Txn(&txn, 0, groupCtx) == nil {
+ if verifyTxn(&txn, 0, groupCtx) == nil {
t.Errorf("transaction %#v verified without sig", txn)
}
}
@@ -205,13 +214,13 @@ func TestTxnValidationStateProof(t *testing.T) {
groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{stxn}, blockHeader, nil)
require.NoError(t, err)
- err = Txn(&stxn, 0, groupCtx)
+ err = verifyTxn(&stxn, 0, groupCtx)
require.NoError(t, err, "state proof txn %#v did not verify", stxn)
stxn2 := stxn
stxn2.Txn.Type = protocol.PaymentTx
stxn2.Txn.Header.Fee = basics.MicroAlgos{Raw: proto.MinTxnFee}
- err = Txn(&stxn2, 0, groupCtx)
+ err = verifyTxn(&stxn2, 0, groupCtx)
require.Error(t, err, "payment txn %#v verified from StateProofSender", stxn2)
secret := keypair()
@@ -219,28 +228,28 @@ func TestTxnValidationStateProof(t *testing.T) {
stxn2.Txn.Header.Sender = basics.Address(secret.SignatureVerifier)
stxn2.Txn.Header.Fee = basics.MicroAlgos{Raw: proto.MinTxnFee}
stxn2 = stxn2.Txn.Sign(secret)
- err = Txn(&stxn2, 0, groupCtx)
+ err = verifyTxn(&stxn2, 0, groupCtx)
require.Error(t, err, "state proof txn %#v verified from non-StateProofSender", stxn2)
// state proof txns are not allowed to have non-zero values for many fields
stxn2 = stxn
stxn2.Txn.Header.Fee = basics.MicroAlgos{Raw: proto.MinTxnFee}
- err = Txn(&stxn2, 0, groupCtx)
+ err = verifyTxn(&stxn2, 0, groupCtx)
require.Error(t, err, "state proof txn %#v verified", stxn2)
stxn2 = stxn
stxn2.Txn.Header.Note = []byte{'A'}
- err = Txn(&stxn2, 0, groupCtx)
+ err = verifyTxn(&stxn2, 0, groupCtx)
require.Error(t, err, "state proof txn %#v verified", stxn2)
stxn2 = stxn
stxn2.Txn.Lease[0] = 1
- err = Txn(&stxn2, 0, groupCtx)
+ err = verifyTxn(&stxn2, 0, groupCtx)
require.Error(t, err, "state proof txn %#v verified", stxn2)
stxn2 = stxn
stxn2.Txn.RekeyTo = basics.Address(secret.SignatureVerifier)
- err = Txn(&stxn2, 0, groupCtx)
+ err = verifyTxn(&stxn2, 0, groupCtx)
require.Error(t, err, "state proof txn %#v verified", stxn2)
}
@@ -258,7 +267,7 @@ func TestDecodeNil(t *testing.T) {
// This used to panic when run on a zero value of SignedTxn.
groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{st}, blockHeader, nil)
require.NoError(t, err)
- Txn(&st, 0, groupCtx)
+ verifyTxn(&st, 0, groupCtx)
}
}
@@ -425,9 +434,84 @@ func BenchmarkTxn(b *testing.B) {
groupCtx, err := PrepareGroupContext(txnGroup, blk.BlockHeader, nil)
require.NoError(b, err)
for i, txn := range txnGroup {
- err := Txn(&txn, i, groupCtx)
+ err := verifyTxn(&txn, i, groupCtx)
require.NoError(b, err)
}
}
b.StopTimer()
}
+
+// TestTxnGroupCacheUpdate uses TxnGroup to verify txns and add them to the
+// cache. Then makes sure that only the valid txns are verified and added to
+// the cache.
+func TestTxnGroupCacheUpdate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ _, signedTxn, secrets, addrs := generateTestObjects(100, 20, 50)
+ blkHdr := bookkeeping.BlockHeader{
+ Round: 50,
+ GenesisHash: crypto.Hash([]byte{1, 2, 3, 4, 5}),
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: protocol.ConsensusCurrentVersion,
+ },
+ RewardsState: bookkeeping.RewardsState{
+ FeeSink: feeSink,
+ RewardsPool: poolAddr,
+ },
+ }
+
+ txnGroups := generateTransactionGroups(signedTxn, secrets, addrs)
+ cache := MakeVerifiedTransactionCache(1000)
+
+ // break the signature and see if it fails.
+ txnGroups[0][0].Sig[0] = txnGroups[0][0].Sig[0] + 1
+
+ _, err := TxnGroup(txnGroups[0], blkHdr, cache, nil)
+ require.Error(t, err)
+
+ // The txns should not be in the cache
+ unverifiedGroups := cache.GetUnverifiedTransactionGroups(txnGroups[:1], spec, protocol.ConsensusCurrentVersion)
+ require.Len(t, unverifiedGroups, 1)
+
+ unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups[:2], spec, protocol.ConsensusCurrentVersion)
+ require.Len(t, unverifiedGroups, 2)
+
+ _, err = TxnGroup(txnGroups[1], blkHdr, cache, nil)
+ require.NoError(t, err)
+
+ // Only the second txn should be in the cache
+ unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups[:2], spec, protocol.ConsensusCurrentVersion)
+ require.Len(t, unverifiedGroups, 1)
+
+ // Fix the signature
+ txnGroups[0][0].Sig[0] = txnGroups[0][0].Sig[0] - 1
+
+ _, err = TxnGroup(txnGroups[0], blkHdr, cache, nil)
+ require.NoError(t, err)
+
+ // Both transactions should be in the cache
+ unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups[:2], spec, protocol.ConsensusCurrentVersion)
+ require.Len(t, unverifiedGroups, 0)
+
+ // Break a random signature
+ txgIdx := rand.Intn(len(txnGroups))
+ txIdx := rand.Intn(len(txnGroups[txgIdx]))
+ txnGroups[txgIdx][txIdx].Sig[0] = txnGroups[0][0].Sig[0] + 1
+
+ numFailed := 0
+
+ // Add them to the cache by verifying them
+ for _, txng := range txnGroups {
+ _, err = TxnGroup(txng, blkHdr, cache, nil)
+ if err != nil {
+ numFailed++
+ }
+ }
+ require.Equal(t, 1, numFailed)
+
+ // Onle one transaction should not be in cache
+ unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups, spec, protocol.ConsensusCurrentVersion)
+ require.Len(t, unverifiedGroups, 1)
+
+ require.Equal(t, unverifiedGroups[0], txnGroups[txgIdx])
+}
diff --git a/data/transactions/verify/verifiedTxnCache.go b/data/transactions/verify/verifiedTxnCache.go
index b41993be9..82f0e9772 100644
--- a/data/transactions/verify/verifiedTxnCache.go
+++ b/data/transactions/verify/verifiedTxnCache.go
@@ -60,8 +60,8 @@ type VerifiedTransactionCache interface {
Add(txgroup []transactions.SignedTxn, groupCtx *GroupContext)
// AddPayset works in a similar way to Add, but is intended for adding an array of transaction groups, along with their corresponding contexts.
AddPayset(txgroup [][]transactions.SignedTxn, groupCtxs []*GroupContext) error
- // GetUnverifiedTranscationGroups compares the provided payset against the currently cached transactions and figure which transaction groups aren't fully cached.
- GetUnverifiedTranscationGroups(payset [][]transactions.SignedTxn, CurrSpecAddrs transactions.SpecialAddresses, CurrProto protocol.ConsensusVersion) [][]transactions.SignedTxn
+ // GetUnverifiedTransactionGroups compares the provided payset against the currently cached transactions and figure which transaction groups aren't fully cached.
+ GetUnverifiedTransactionGroups(payset [][]transactions.SignedTxn, CurrSpecAddrs transactions.SpecialAddresses, CurrProto protocol.ConsensusVersion) [][]transactions.SignedTxn
// UpdatePinned replaces the pinned entries with the one provided in the pinnedTxns map. This is typically expected to be a subset of the
// already-pinned transactions. If a transaction is not currently pinned, and it's can't be found in the cache, a errMissingPinnedEntry error would be generated.
UpdatePinned(pinnedTxns map[transactions.Txid]transactions.SignedTxn) error
@@ -115,8 +115,8 @@ func (v *verifiedTransactionCache) AddPayset(txgroup [][]transactions.SignedTxn,
return nil
}
-// GetUnverifiedTranscationGroups compares the provided payset against the currently cached transactions and figure which transaction groups aren't fully cached.
-func (v *verifiedTransactionCache) GetUnverifiedTranscationGroups(txnGroups [][]transactions.SignedTxn, currSpecAddrs transactions.SpecialAddresses, currProto protocol.ConsensusVersion) (unverifiedGroups [][]transactions.SignedTxn) {
+// GetUnverifiedTransactionGroups compares the provided payset against the currently cached transactions and figure which transaction groups aren't fully cached.
+func (v *verifiedTransactionCache) GetUnverifiedTransactionGroups(txnGroups [][]transactions.SignedTxn, currSpecAddrs transactions.SpecialAddresses, currProto protocol.ConsensusVersion) (unverifiedGroups [][]transactions.SignedTxn) {
v.bucketsLock.Lock()
defer v.bucketsLock.Unlock()
groupCtx := &GroupContext{
@@ -272,7 +272,7 @@ func (v *mockedCache) AddPayset(txgroup [][]transactions.SignedTxn, groupCtxs []
return nil
}
-func (v *mockedCache) GetUnverifiedTranscationGroups(txnGroups [][]transactions.SignedTxn, currSpecAddrs transactions.SpecialAddresses, currProto protocol.ConsensusVersion) (unverifiedGroups [][]transactions.SignedTxn) {
+func (v *mockedCache) GetUnverifiedTransactionGroups(txnGroups [][]transactions.SignedTxn, currSpecAddrs transactions.SpecialAddresses, currProto protocol.ConsensusVersion) (unverifiedGroups [][]transactions.SignedTxn) {
if v.alwaysVerified {
return nil
}
diff --git a/data/transactions/verify/verifiedTxnCache_test.go b/data/transactions/verify/verifiedTxnCache_test.go
index 35d958e35..e3001db67 100644
--- a/data/transactions/verify/verifiedTxnCache_test.go
+++ b/data/transactions/verify/verifiedTxnCache_test.go
@@ -76,7 +76,7 @@ func TestBucketCycling(t *testing.T) {
require.Equal(t, 1, len(impl.buckets[0]))
}
-func TestGetUnverifiedTranscationGroups50(t *testing.T) {
+func TestGetUnverifiedTransactionGroups50(t *testing.T) {
partitiontest.PartitionTest(t)
size := 300
@@ -97,11 +97,11 @@ func TestGetUnverifiedTranscationGroups50(t *testing.T) {
}
}
- unverifiedGroups := impl.GetUnverifiedTranscationGroups(txnGroups, spec, protocol.ConsensusCurrentVersion)
+ unverifiedGroups := impl.GetUnverifiedTransactionGroups(txnGroups, spec, protocol.ConsensusCurrentVersion)
require.Equal(t, len(expectedUnverifiedGroups), len(unverifiedGroups))
}
-func BenchmarkGetUnverifiedTranscationGroups50(b *testing.B) {
+func BenchmarkGetUnverifiedTransactionGroups50(b *testing.B) {
if b.N < 20000 {
b.N = 20000
}
@@ -125,7 +125,7 @@ func BenchmarkGetUnverifiedTranscationGroups50(b *testing.B) {
startTime := time.Now()
measuringMultipler := 1000
for i := 0; i < measuringMultipler; i++ {
- impl.GetUnverifiedTranscationGroups(queryTxnGroups, spec, protocol.ConsensusCurrentVersion)
+ impl.GetUnverifiedTransactionGroups(queryTxnGroups, spec, protocol.ConsensusCurrentVersion)
}
duration := time.Now().Sub(startTime)
// calculate time per 10K verified entries:
diff --git a/data/txHandler.go b/data/txHandler.go
index 46248b4ed..cd4c25c8e 100644
--- a/data/txHandler.go
+++ b/data/txHandler.go
@@ -219,7 +219,7 @@ func (handler *TxHandler) asyncVerifySignature(arg interface{}) interface{} {
}
func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) network.OutgoingMessage {
- dec := protocol.NewDecoderBytes(rawmsg.Data)
+ dec := protocol.NewMsgpDecoderBytes(rawmsg.Data)
ntx := 0
unverifiedTxGroup := make([]transactions.SignedTxn, 1)
for {
@@ -265,11 +265,13 @@ func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) net
// Note that this also checks the consistency of the transaction's group hash,
// which is required for safe transaction signature caching behavior.
func (handler *TxHandler) checkAlreadyCommitted(tx *txBacklogMsg) (processingDone bool) {
- txids := make([]transactions.Txid, len(tx.unverifiedTxGroup))
- for i := range tx.unverifiedTxGroup {
- txids[i] = tx.unverifiedTxGroup[i].ID()
+ if logging.Base().IsLevelEnabled(logging.Debug) {
+ txids := make([]transactions.Txid, len(tx.unverifiedTxGroup))
+ for i := range tx.unverifiedTxGroup {
+ txids[i] = tx.unverifiedTxGroup[i].ID()
+ }
+ logging.Base().Debugf("got a tx group with IDs %v", txids)
}
- logging.Base().Debugf("got a tx group with IDs %v", txids)
// do a quick test to check that this transaction could potentially be committed, to reject dup pending transactions
err := handler.txPool.Test(tx.unverifiedTxGroup)
diff --git a/data/txHandler_test.go b/data/txHandler_test.go
index 653cd51e9..14a5495eb 100644
--- a/data/txHandler_test.go
+++ b/data/txHandler_test.go
@@ -18,6 +18,7 @@ package data
import (
"fmt"
+ "io"
"math/rand"
"testing"
"time"
@@ -31,17 +32,18 @@ import (
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/pools"
"github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/verify"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
"github.com/algorand/go-algorand/util/execpool"
)
-func BenchmarkTxHandlerProcessDecoded(b *testing.B) {
- b.StopTimer()
- b.ResetTimer()
- const numRounds = 10
+func BenchmarkTxHandlerProcessing(b *testing.B) {
const numUsers = 100
log := logging.TestingLog(b)
+ log.SetLevel(logging.Warn)
secrets := make([]*crypto.SignatureSecrets, numUsers)
addresses := make([]basics.Address, numUsers)
@@ -73,17 +75,20 @@ func BenchmarkTxHandlerProcessDecoded(b *testing.B) {
l := ledger
- cfg.TxPoolSize = 20000
+ cfg.TxPoolSize = 75000
cfg.EnableProcessBlockStats = false
tp := pools.MakeTransactionPool(l.Ledger, cfg, logging.Base())
- signedTransactions := make([]transactions.SignedTxn, 0, b.N)
- for i := 0; i < b.N/numUsers; i++ {
- for u := 0; u < numUsers; u++ {
+ backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
+ txHandler := MakeTxHandler(tp, l, &mocks.MockNetwork{}, "", crypto.Digest{}, backlogPool)
+
+ makeTxns := func(N int) [][]transactions.SignedTxn {
+ ret := make([][]transactions.SignedTxn, 0, N)
+ for u := 0; u < N; u++ {
// generate transactions
tx := transactions.Transaction{
Type: protocol.PaymentTx,
Header: transactions.Header{
- Sender: addresses[u],
+ Sender: addresses[u%numUsers],
Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2},
FirstValid: 0,
LastValid: basics.Round(proto.MaxTxnLife),
@@ -94,18 +99,51 @@ func BenchmarkTxHandlerProcessDecoded(b *testing.B) {
Amount: basics.MicroAlgos{Raw: mockBalancesMinBalance + (rand.Uint64() % 10000)},
},
}
- signedTx := tx.Sign(secrets[u])
- signedTransactions = append(signedTransactions, signedTx)
+ signedTx := tx.Sign(secrets[u%numUsers])
+ ret = append(ret, []transactions.SignedTxn{signedTx})
}
+ return ret
}
- backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
- txHandler := MakeTxHandler(tp, l, &mocks.MockNetwork{}, "", crypto.Digest{}, backlogPool)
- b.StartTimer()
- for _, signedTxn := range signedTransactions {
- txHandler.processDecoded([]transactions.SignedTxn{signedTxn})
- }
+
+ b.Run("processDecoded", func(b *testing.B) {
+ signedTransactionGroups := makeTxns(b.N)
+ b.ResetTimer()
+ for i := range signedTransactionGroups {
+ txHandler.processDecoded(signedTransactionGroups[i])
+ }
+ })
+ b.Run("verify.TxnGroup", func(b *testing.B) {
+ signedTransactionGroups := makeTxns(b.N)
+ b.ResetTimer()
+ // make a header including only the fields needed by PrepareGroupContext
+ hdr := bookkeeping.BlockHeader{}
+ hdr.FeeSink = basics.Address{}
+ hdr.RewardsPool = basics.Address{}
+ hdr.CurrentProtocol = protocol.ConsensusCurrentVersion
+ vtc := vtCache{}
+ b.Logf("verifying %d signedTransactionGroups", len(signedTransactionGroups))
+ b.ResetTimer()
+ for i := range signedTransactionGroups {
+ verify.TxnGroup(signedTransactionGroups[i], hdr, vtc, l)
+ }
+ })
}
+// vtCache is a noop VerifiedTransactionCache
+type vtCache struct{}
+
+func (vtCache) Add(txgroup []transactions.SignedTxn, groupCtx *verify.GroupContext) {}
+func (vtCache) AddPayset(txgroup [][]transactions.SignedTxn, groupCtxs []*verify.GroupContext) error {
+ return nil
+}
+func (vtCache) GetUnverifiedTransactionGroups(payset [][]transactions.SignedTxn, CurrSpecAddrs transactions.SpecialAddresses, CurrProto protocol.ConsensusVersion) [][]transactions.SignedTxn {
+ return nil
+}
+func (vtCache) UpdatePinned(pinnedTxns map[transactions.Txid]transactions.SignedTxn) error {
+ return nil
+}
+func (vtCache) Pin(txgroup []transactions.SignedTxn) error { return nil }
+
func BenchmarkTimeAfter(b *testing.B) {
b.StopTimer()
b.ResetTimer()
@@ -121,3 +159,92 @@ func BenchmarkTimeAfter(b *testing.B) {
}
}
}
+
+func makeRandomTransactions(num int) ([]transactions.SignedTxn, []byte) {
+ stxns := make([]transactions.SignedTxn, num)
+ result := make([]byte, 0, num*200)
+ for i := 0; i < num; i++ {
+ var sig crypto.Signature
+ crypto.RandBytes(sig[:])
+ var addr basics.Address
+ crypto.RandBytes(addr[:])
+ stxns[i] = transactions.SignedTxn{
+ Sig: sig,
+ AuthAddr: addr,
+ Txn: transactions.Transaction{
+ Header: transactions.Header{
+ Sender: addr,
+ Fee: basics.MicroAlgos{Raw: crypto.RandUint64()},
+ Note: sig[:],
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: addr,
+ Amount: basics.MicroAlgos{Raw: crypto.RandUint64()},
+ },
+ },
+ }
+
+ d2 := protocol.Encode(&stxns[i])
+ result = append(result, d2...)
+ }
+ return stxns, result
+}
+
+func TestTxHandlerProcessIncomingTxn(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ const numTxns = 11
+ handler := TxHandler{
+ backlogQueue: make(chan *txBacklogMsg, 1),
+ }
+ stxns, blob := makeRandomTransactions(numTxns)
+ action := handler.processIncomingTxn(network.IncomingMessage{Data: blob})
+ require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, action)
+
+ require.Equal(t, 1, len(handler.backlogQueue))
+ msg := <-handler.backlogQueue
+ require.Equal(t, numTxns, len(msg.unverifiedTxGroup))
+ for i := 0; i < numTxns; i++ {
+ require.Equal(t, stxns[i], msg.unverifiedTxGroup[i])
+ }
+}
+
+const benchTxnNum = 25_000
+
+func BenchmarkTxHandlerDecoder(b *testing.B) {
+ _, blob := makeRandomTransactions(benchTxnNum)
+ var err error
+ stxns := make([]transactions.SignedTxn, benchTxnNum+1)
+ for i := 0; i < b.N; i++ {
+ dec := protocol.NewDecoderBytes(blob)
+ var idx int
+ for {
+ err = dec.Decode(&stxns[idx])
+ if err == io.EOF {
+ break
+ }
+ require.NoError(b, err)
+ idx++
+ }
+ require.Equal(b, benchTxnNum, idx)
+ }
+}
+
+func BenchmarkTxHandlerDecoderMsgp(b *testing.B) {
+ _, blob := makeRandomTransactions(benchTxnNum)
+ var err error
+ stxns := make([]transactions.SignedTxn, benchTxnNum+1)
+ for i := 0; i < b.N; i++ {
+ dec := protocol.NewMsgpDecoderBytes(blob)
+ var idx int
+ for {
+ err = dec.Decode(&stxns[idx])
+ if err == io.EOF {
+ break
+ }
+ require.NoError(b, err)
+ idx++
+ }
+ require.Equal(b, benchTxnNum, idx)
+ }
+}
diff --git a/gen/generate.go b/gen/generate.go
index 804e893c2..15eb09103 100644
--- a/gen/generate.go
+++ b/gen/generate.go
@@ -19,7 +19,6 @@ package gen
import (
"fmt"
"io"
- "io/ioutil"
"math"
"os"
"path/filepath"
@@ -374,7 +373,7 @@ func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion,
}
jsonData := protocol.EncodeJSON(g)
- err = ioutil.WriteFile(filepath.Join(outDir, config.GenesisJSONFile), append(jsonData, '\n'), 0666)
+ err = os.WriteFile(filepath.Join(outDir, config.GenesisJSONFile), append(jsonData, '\n'), 0666)
if (verbose) && (rootKeyCreated > 0 || partKeyCreated > 0) {
fmt.Printf("Created %d new rootkeys and %d new partkeys in %s.\n", rootKeyCreated, partKeyCreated, time.Since(createStart))
diff --git a/go.mod b/go.mod
index dd46477e8..f20b25dc6 100644
--- a/go.mod
+++ b/go.mod
@@ -3,6 +3,7 @@ module github.com/algorand/go-algorand
go 1.17
require (
+ github.com/algorand/avm-abi v0.1.0
github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414
github.com/algorand/go-codec/codec v1.1.8
github.com/algorand/go-deadlock v0.2.2
@@ -12,7 +13,6 @@ require (
github.com/algorand/oapi-codegen v1.3.7
github.com/algorand/websocket v1.4.5
github.com/aws/aws-sdk-go v1.16.5
- github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e
github.com/consensys/gnark-crypto v0.7.0
github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018
github.com/dchest/siphash v1.2.1
@@ -39,6 +39,7 @@ require (
)
require (
+ github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e // indirect
github.com/cpuguy83/go-md2man v1.0.8 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
diff --git a/go.sum b/go.sum
index 0537d1101..58fbbdb98 100644
--- a/go.sum
+++ b/go.sum
@@ -1,3 +1,5 @@
+github.com/algorand/avm-abi v0.1.0 h1:znZFQXpSUVYz37vXbaH5OZG2VK4snTyXwnc/tV9CVr4=
+github.com/algorand/avm-abi v0.1.0/go.mod h1:+CgwM46dithy850bpTeHh9MC99zpn2Snirb3QTl2O/g=
github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414 h1:nwYN+GQ7Z5OOfZwqBO1ma7DSlP7S1YrKWICOyjkwqrc=
github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414/go.mod h1:OkQyHlGvS0kLNcIWbC21/uQcnbfwSOQm+wiqWwBG9pQ=
github.com/algorand/go-codec v1.1.8/go.mod h1:XhzVs6VVyWMLu6cApb9/192gBjGRVGm5cX5j203Heg4=
diff --git a/installer/genesis/alphanet/genesis.json b/installer/genesis/alphanet/genesis.json
new file mode 100644
index 000000000..b3944e7af
--- /dev/null
+++ b/installer/genesis/alphanet/genesis.json
@@ -0,0 +1,313 @@
+{
+ "alloc": [
+ {
+ "addr": "7777777777777777777777777777777777777777777777777774MSJUVU",
+ "comment": "RewardsPool",
+ "state": {
+ "algo": 125000000000000,
+ "onl": 2
+ }
+ },
+ {
+ "addr": "OOZZ32IHB6SS6ZTARKJ2PQP3QKE7R3IWQTOPXRGLTAGPVCDS3FHJOEOYVM",
+ "comment": "FeeSink",
+ "state": {
+ "algo": 100000,
+ "onl": 2
+ }
+ },
+ {
+ "addr": "NXD653KPZRLYFZKUWNYVZUDUBMB5NWGRZYSSMNOAR2GKNR4WE4D6JJ6SDQ",
+ "comment": "Wallet1",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "3s5/mxcllqsE0KabL4rzHC9bxLp3BKHLHUfHrl2aYRI=",
+ "stprf": "9yV+6Z2KoQuHJWhDqNZ/ULQtcatXQXVtk2Ei/nBB0aH+3p4NcMj8ONbJNi88sqrsCHR1wArBYnVtwSk+Qwq/6Q==",
+ "vote": "F3ZUaQ+NHy0+Oi39s/ah4riH10kVh9wqdo2E8Vq1Q/s=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "GMD5VUK6MOF5TNKJB7MGB5TRWZLFDG435LGZFCB4GK7TB75EDT3XEJKDSI",
+ "comment": "Wallet10",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "aSAlZpmcmnp/ITcKu+tJqIppOaXy1SrvtzIbLW5ZVxQ=",
+ "stprf": "D9rUr2pfk2rcE2h1BZgyHvzsiHC2Lco6fxTgulZd66A1t2+IY7TiyE+cW/yzyASrcFM1ku6HbDpM+dsn81BTFw==",
+ "vote": "FlVJov8Pt7nkVuaV0g8MWW0KoWX6QnLilA35wrXWXn4=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "QB5WTO545MRUDHPP3H4EM2H7TJDWEB3CBDDZGAVIGIXR56S5EIZDPYLU44",
+ "comment": "Wallet11",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "hNlU23ZwJ0uIasSwuy7urtPVbHVmwUyUSBYumIqxuKY=",
+ "stprf": "3eJyN0gNwYluwBPZQDjjPj+lsSsuyJlRMKm1yDeNfw/lMyNdkUJU59aJdkMuye3qd5Av6wjnxhGiCXq42WUo4Q==",
+ "vote": "wmXN99MQgOuMgeShyJ4NcL6jKQbouKrdUBgYUhJLDfQ=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "FYT3QP5FN27QM53TZD77W4LL6SNS4ULEAIQF6AYPCFEJIUSGAYIXMWCEI4",
+ "comment": "Wallet12",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "FVVkzU8fEt+LhT6Zk1ITNSIvEcY6uVXzKWQbU+WIp+k=",
+ "stprf": "GZ903Jk56IN2uG+OjKl8jfRuuEKReyzuyU23MZxLvDjgrcHfIVhs/z3pgzOrrnajV4jX5PSasI5L6Vz8iuOLfQ==",
+ "vote": "lVPjjTJMZpvLvrhNj8U4D6Emc2vii27ZaeClBk5emIM=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "UHJSEAVFDIOLLT6UYIFRLTEV7CTQPYJEQDAIHRVYAPPSX6UJIMVZR4P7AA",
+ "comment": "Wallet13",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "0P5nHFzLSU+PEcM5r58V71E6Mf4IR+iYZXHbldH60MU=",
+ "stprf": "TEyFNqFjWUmxmjMTDatOP8Aua8P1m6qIRYqwDJsCUOAvUSsekq+2MxZ4MT+Pfjl69GH6SR9siLHvPd9vgL1mlQ==",
+ "vote": "g8+LvQ+DgbIA1aDbUkkNfYKcwFWTO/UK6ljxeBYcp5M=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "6JLQSZQMVQUWPOYLEAOAOHDMC2PI3ZXPT5ADUE6YMBVL36EKQ2DHCP2WQI",
+ "comment": "Wallet14",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "5xOCDWQnV1wQccNxeXOPsvsxFu2rEXslM/JWtPKxeA4=",
+ "stprf": "zemmbbCDp5ekkJr6IrqrGewFrOHltjVBNBoCak4AmUOCqwtDdnCKbQq4NphemyEghfpfXKfYWhFQ44NYqCLTpQ==",
+ "vote": "gjDiA9kbvjXUahdEOjygyw86x1irFZbeyHCw/qHExwg=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "HHOHUW5U2BLQEWUUQLPXZMVZPCJQUJFPZCWHCBYOVHQYON2FWAARD6OMDM",
+ "comment": "Wallet15",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "h84u9vUCoCM85CLMfiCTPfNDO1bPqaafWlqC+PEmKQw=",
+ "stprf": "2MgJ5pgaDhGuo00re4oOsz+TQxCavOglg1Zmrir3K0USmj6grt1NYiJv+51xvt4BIkKG/Pk2E/wUk38jQD3CKA==",
+ "vote": "pbyNjS5u9THxb+y6lsH28T70EPGvBJ7Y89bzCdJAP7E=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "4U3ACMGUA7OBUE4BJR2U6CZQVJOBWQXEIKKG74CYOFHW7RYSLM6WUZWO2U",
+ "comment": "Wallet16",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "LHWqqjw5rp/ar0/EjU0luGknE8+TYASmg8orMF+IAMY=",
+ "stprf": "k2kWdZ6Q3S3D/6mlI3QK7Z9kmCa4kbnJa985NLs5dYb5mkdsVueZVJ0kUnVdmsthCQVq5AYxeYEZiCCF4th2Ew==",
+ "vote": "1koKJypVgKG+8nt4k4dKbHpR64+aBeDi6frjXaOioro=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "W6MAVGKBYY3DFJRNOWOWKOYFWZJWYILPAFB4IENTOT63ZHRYXNTOUXSDCA",
+ "comment": "Wallet17",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "LRfmRYLM91aDXt2llKpnYAihw3iVgk8G3377N/wHhKk=",
+ "stprf": "GZFyAPtAtAo+QE4nR7bZ87G3U6aZOTfFmyufc1kYOd1RP6bGMQUspQ8B8XKC3Y/9mhnyrVHNzhEqqYASqcyGmg==",
+ "vote": "TSgNl6SMI8/0kfD/YMpnUEGNqka660SUa0Fhw+ESh4A=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "SW6ZX2GHABLYFZ2GUFPNX65RAKJYSCSBPZMRDIE5ECKB5Q6NW34EMPWKYI",
+ "comment": "Wallet18",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "LAkKAzZwUqWv5IiLGwILXiuEjo79YmfrBSraUGxBRvw=",
+ "stprf": "xljekFxo4XYiwL+te0DMuXqQigvRVVqW8Y1MSrz+nqTlJ8v07hEkDu2ZPVAdeWb8cL5K4fkI/wYICFbqfzf8Xw==",
+ "vote": "XJ8smxZOlGTzt922nabeReh/CfZ3nPs58/72gXXahq4=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "EOGRZZV2MXCS5WG44XJ77S5YIX5JUWHJ6W6FDBOV5TSROTKMNJEGKNFR6U",
+ "comment": "Wallet19",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "OBgdemFmMNVkkfNNnSgxxaH0VGbLCpAs23qk+/i5pDk=",
+ "stprf": "VS3bpYvHZsV+i4E9Rck1ADqOFCe9mv+xEuB/4AbwL5reIs+XmgeSqeZYtjA44Yhu4nMMrsUq5KEfeJt+0zWw5Q==",
+ "vote": "Rcwn7JRB5X45mXj80+ra2C/1DB9uUS1W/E29+CBI1w8=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "UQCHKHGD7D5N6R5Q6VPQU6XNOPSULM53DGP27ISJINZIH3XKW2LHBYAKXU",
+ "comment": "Wallet2",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "loxF/eE88JkhmCvdMl+DmFsUvLeFuUaJNv1ecvnBDjU=",
+ "stprf": "faSsGeoQ5SWNDgD01rg+zZ3PZWqTMBIl69zyKdP0N/wSYdmO2zApuXEXj8ZEzzV3sG0d7/vdnlf1hTR8awofug==",
+ "vote": "FS57UeflDSSSrlRUlwabMBJRKHGef2Td3dSS148tc8w=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "GMXFVGAXXUEFXSREXM6JUQD4SVLXPSHG7YU5FDU4UEAY5WNANGAHJQOFGQ",
+ "comment": "Wallet20",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "DKoC+wEPaybLRdj+MKm3Qehy0br4i/UOaP+usLztyik=",
+ "stprf": "YqHEbJ6P1HhxLNai9oqBEWnthLcnBZjPuEoGxbM8QE/LWgAFWA8p8ZZ1UpW/l4dHZWq5BKYym3bZGpbtFMjuZg==",
+ "vote": "XtL1RzpArMLvV5yKlbfhKMR0NhNphJMUgkINSQ1yTXY=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "FO2JFV5TECTX5IPM23NHVQ2G7KM2CB463RBZPWYAIYPG2CFX2NHDZKTDHA",
+ "comment": "Wallet21",
+ "state": {
+ "algo": 1250000000000000
+ }
+ },
+ {
+ "addr": "M5G7TXRP7LQYML7RWTJWYKVC63PSNLPT2Q4OML4L4INNYIHCOFMROUALDM",
+ "comment": "Wallet22",
+ "state": {
+ "algo": 1250000000000000
+ }
+ },
+ {
+ "addr": "VNIGFIMNLXWU3HU3PTTTFTNXYUV7BDIR3AEH7F7T736XKBSEBWRIVXXYKU",
+ "comment": "Wallet23",
+ "state": {
+ "algo": 1250000000000000
+ }
+ },
+ {
+ "addr": "GKMGMA4PNAYFXY3ZGN3XYZGT4H6BVMN23AZNN6OW77QZOKTTJMYF54FWP4",
+ "comment": "Wallet24",
+ "state": {
+ "algo": 1250000000000000
+ }
+ },
+ {
+ "addr": "3SDISKXXLMPWTBSABV2NJQ5MLIVKHGDXOGHMSJY4ZQ3AEBYLESF5AJYMNI",
+ "comment": "Wallet3",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "vfxc0A3ROMRug/YbwOFxbJGk+6Skh7rzG5r8CzQL7us=",
+ "stprf": "/bS8MveMPCzKd1Nwl4aFuAfGvwMwj/tLdbCVHBDV1mPUSgvqwgxTXWNZaRF3tX1ietC0DMxfJcb/51P9IblG8Q==",
+ "vote": "XG9EJKvg6GPfhQK+4A9qCYjOtCJrJvL4tBSy9pG7YWs=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "P22FLWO27IIZNALADADVFXM5SI4Y7EQPDDB4WQDJS7OTMD5RGE6VFQCJAE",
+ "comment": "Wallet4",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "jCXkeH6uE6YC3XQJLC53Y1rg0hERpgt8Y/HePQwnSRA=",
+ "stprf": "Pk2Nve+Ngpl8Y2ZoFE0yh1jMdKqKCX7QVcynsl1Kb4mo91S/xJ+HCEPV7KCOE4gapMhYEsfmtHxBwmArGcBkJA==",
+ "vote": "5B9VGMBD/U/AojL6cDv86DuqhhGGGp1ZMxuqDVE8lZA=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "WVJHEL3VPUI5RR4IBOG3VI6OFEJPELDQ7L2RMSZIUFTC3X5G4H5FI2NOGI",
+ "comment": "Wallet5",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "SRTJ0EUewq8ZQKMzygOm4b10HYchjg7xWTHGdKfZYig=",
+ "stprf": "rsvC8L2Ko+4LStTjvVEq88RnKNp8iHff67r3F7YqKTnk4UiwgsDblvsdQoT6BfMJ7f4N8x1ORgw6trGf1VHNjw==",
+ "vote": "lpXIAEYxDEB7hNdvdf+uCt7SktCFIOhejrmHL0mjg34=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "5TM5MD5FZBKNY3SKH7GYPIUAJ7PXIERXFLQ4O4FNL7OLUD6MHNJN4ZAB6Y",
+ "comment": "Wallet6",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "L9oncIYILrFOKQZTtYUT7VhVxs4oJSsalLQDEmKnfHw=",
+ "stprf": "2nBNjnteR7en0E6/9oSy1YiEAS0QDgI0kE2nhJPBo1Ehw//xgcu9GvTaZZBuIMho+56Uol/qCr+HZuOp5Z5bPw==",
+ "vote": "unBd7foxxmX6M3tFJiNX5nD9c30/MNvXkub7/0lUHAc=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "H3RSJSIQYCM5BIBDHHRSGAL2O2NVXV55F4HHTIDZKI2UZIEASLGH6B3STU",
+ "comment": "Wallet7",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "E1pGwlUFBmi1WvSV3NO3UbkbwotKFZlG1bR+gGBmPpo=",
+ "stprf": "sd0gkoXKFsqLqimLDP0DfWBOqc7h9gmHzZEB/o932f7X93KCZhvlDvgOSbgU03LjP8Bn+7H0CpuL/TXi7SoiCg==",
+ "vote": "G4N2xDms0d+MIjaTVubbNYXiK4Ef7kbQAC1praFOHGA=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "NJFY7INZLZVXAHAIEIR2VYTDXMBLAYKHQO3APX4RRTFN2WRUW6JWJ6L6RA",
+ "comment": "Wallet8",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "wGveX8ZZCA2YY4mvgGAkrGw4TuTki9aWf3bOAuLblhw=",
+ "stprf": "llVw21zZ7lsxQ99EPz3FVMUIfy3vkBemly60BZv9HURxjMvqNHC5XIm1slN339IHC54t1WXt5YMlzqGxTNEBjw==",
+ "vote": "mBNaxPrxkhReW3O+yYFdsBdhh642N2lbyQqAWFpkUsc=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "AQ4PRMUOLU26M3XSGGT7UIYWICBV3DCB7FYKSKPAXYBSOFEDBHPFAEOPXE",
+ "comment": "Wallet9",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "BI3DwXKQrnqxulSpUVGGtz4TBKh0RC/qbpSqDdZXFhc=",
+ "stprf": "0XeVxzG/voI2z0Imz79sN5CHI3U0P7ljlTpMLqODE5FaDCkB0s9vkACKiTQfNZCbfQl+20seL/7cyOOhF+OOVA==",
+ "vote": "5zFeVWFVN2huVvdsmYt0vhlFuggwpfY8QFGLcWbkGDg=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ }
+ ],
+ "fees": "OOZZ32IHB6SS6ZTARKJ2PQP3QKE7R3IWQTOPXRGLTAGPVCDS3FHJOEOYVM",
+ "id": "v1",
+ "network": "alphanet",
+ "proto": "alpha1",
+ "rwd": "7777777777777777777777777777777777777777777777777774MSJUVU"
+}
diff --git a/installer/rpm/algorand/algorand.spec b/installer/rpm/algorand/algorand.spec
index c7cd519bb..ef58c0db1 100644
--- a/installer/rpm/algorand/algorand.spec
+++ b/installer/rpm/algorand/algorand.spec
@@ -59,7 +59,7 @@ install -m 644 ${REPO_DIR}/installer/rpm/algorand/algorand.repo %{buildroot}/usr
mkdir -p %{buildroot}/var/lib/algorand/genesis
if [ "%{RELEASE_GENESIS_PROCESS}" != "x" ]; then
- genesis_dirs=("devnet" "testnet" "mainnet" "betanet")
+ genesis_dirs=("devnet" "testnet" "mainnet" "betanet" "alphanet")
for dir in "${genesis_dirs[@]}"; do
mkdir -p %{buildroot}/var/lib/algorand/genesis/${dir}
cp ${REPO_DIR}/installer/genesis/${dir}/genesis.json %{buildroot}/var/lib/algorand/genesis/${dir}/genesis.json
@@ -89,6 +89,7 @@ fi
/var/lib/algorand/genesis/testnet/genesis.json
/var/lib/algorand/genesis/betanet/genesis.json
/var/lib/algorand/genesis/mainnet/genesis.json
+ /var/lib/algorand/genesis/alphanet/genesis.json
%endif
/lib/systemd/system/algorand.service
/lib/systemd/system/algorand@.service
diff --git a/ledger/accountdb.go b/ledger/accountdb.go
index 0542b9071..5a92ab919 100644
--- a/ledger/accountdb.go
+++ b/ledger/accountdb.go
@@ -302,7 +302,7 @@ type compactAccountDeltas struct {
}
// onlineAccountDelta track all changes of account state within a range,
-// used in conjunction wih compactOnlineAccountDeltas to group and represent per-account changes.
+// used in conjunction with compactOnlineAccountDeltas to group and represent per-account changes.
// oldAcct represents the "old" state of the account in the DB, and compared against newAcct[0]
// to determine if the acct became online or went offline.
type onlineAccountDelta struct {
@@ -967,13 +967,17 @@ func (a *compactOnlineAccountDeltas) updateOld(idx int, old persistedOnlineAccou
// writeCatchpointStagingBalances inserts all the account balances in the provided array into the catchpoint balance staging table catchpointbalances.
func writeCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, bals []normalizedAccountBalance) error {
+ selectAcctStmt, err := tx.PrepareContext(ctx, "SELECT rowid FROM catchpointbalances WHERE address = ?")
+ if err != nil {
+ return err
+ }
+
insertAcctStmt, err := tx.PrepareContext(ctx, "INSERT INTO catchpointbalances(address, normalizedonlinebalance, data) VALUES(?, ?, ?)")
if err != nil {
return err
}
- var insertRscStmt *sql.Stmt
- insertRscStmt, err = tx.PrepareContext(ctx, "INSERT INTO catchpointresources(addrid, aidx, data) VALUES(?, ?, ?)")
+ insertRscStmt, err := tx.PrepareContext(ctx, "INSERT INTO catchpointresources(addrid, aidx, data) VALUES(?, ?, ?)")
if err != nil {
return err
}
@@ -982,27 +986,41 @@ func writeCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, bals []norm
var rowID int64
for _, balance := range bals {
result, err = insertAcctStmt.ExecContext(ctx, balance.address[:], balance.normalizedBalance, balance.encodedAccountData)
- if err != nil {
- return err
- }
- aff, err := result.RowsAffected()
- if err != nil {
- return err
- }
- if aff != 1 {
- return fmt.Errorf("number of affected record in insert was expected to be one, but was %d", aff)
- }
- rowID, err = result.LastInsertId()
- if err != nil {
- return err
+ if err == nil {
+ var aff int64
+ aff, err = result.RowsAffected()
+ if err != nil {
+ return err
+ }
+ if aff != 1 {
+ return fmt.Errorf("number of affected record in insert was expected to be one, but was %d", aff)
+ }
+ rowID, err = result.LastInsertId()
+ if err != nil {
+ return err
+ }
+ } else {
+ var sqliteErr sqlite3.Error
+ if errors.As(err, &sqliteErr) && sqliteErr.Code == sqlite3.ErrConstraint && sqliteErr.ExtendedCode == sqlite3.ErrConstraintUnique {
+ // address exists: overflowed account record: find addrid
+ err = selectAcctStmt.QueryRowContext(ctx, balance.address[:]).Scan(&rowID)
+ if err != nil {
+ return err
+ }
+ } else {
+ return err
+ }
}
+
// write resources
for aidx := range balance.resources {
- result, err := insertRscStmt.ExecContext(ctx, rowID, aidx, balance.encodedResources[aidx])
+ var result sql.Result
+ result, err = insertRscStmt.ExecContext(ctx, rowID, aidx, balance.encodedResources[aidx])
if err != nil {
return err
}
- aff, err := result.RowsAffected()
+ var aff int64
+ aff, err = result.RowsAffected()
if err != nil {
return err
}
@@ -1593,7 +1611,7 @@ func (bo *baseOnlineAccountData) SetCoreAccountData(ad *ledgercore.AccountData)
type resourceFlags uint8
const (
- resourceFlagsHolding resourceFlags = 0 //nolint:deadcode,varcheck
+ resourceFlagsHolding resourceFlags = 0
resourceFlagsNotHolding resourceFlags = 1
resourceFlagsOwnership resourceFlags = 2
resourceFlagsEmptyAsset resourceFlags = 4
@@ -3965,16 +3983,26 @@ func (mc *MerkleCommitter) LoadPage(page uint64) (content []byte, err error) {
return content, nil
}
+// catchpointAccountResourceCounter keeps track of the resources processed for the current account
+type catchpointAccountResourceCounter struct {
+ totalAppParams uint64
+ totalAppLocalStates uint64
+ totalAssetParams uint64
+ totalAssets uint64
+}
+
// encodedAccountsBatchIter allows us to iterate over the accounts data stored in the accountbase table.
type encodedAccountsBatchIter struct {
- accountsRows *sql.Rows
- resourcesRows *sql.Rows
- nextRow pendingRow
+ accountsRows *sql.Rows
+ resourcesRows *sql.Rows
+ nextBaseRow pendingBaseRow
+ nextResourceRow pendingResourceRow
+ acctResCnt catchpointAccountResourceCounter
}
// Next returns an array containing the account data, in the same way it appear in the database
// returning accountCount accounts data at a time.
-func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, tx *sql.Tx, accountCount int) (bals []encodedBalanceRecordV6, err error) {
+func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, tx *sql.Tx, accountCount int, resourceCount int) (bals []encodedBalanceRecordV6, numAccountsProcessed uint64, err error) {
if iterator.accountsRows == nil {
iterator.accountsRows, err = tx.QueryContext(ctx, "SELECT rowid, address, data FROM accountbase ORDER BY rowid")
if err != nil {
@@ -4000,9 +4028,11 @@ func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, tx *sql.Tx,
return nil
}
- var totalAppParams, totalAppLocalStates, totalAssetParams, totalAssets uint64
+ var totalResources int
+
// emptyCount := 0
- resCb := func(addr basics.Address, cidx basics.CreatableIndex, resData *resourcesData, encodedResourceData []byte) error {
+ resCb := func(addr basics.Address, cidx basics.CreatableIndex, resData *resourcesData, encodedResourceData []byte, lastResource bool) error {
+
emptyBaseAcct := baseAcct.TotalAppParams == 0 && baseAcct.TotalAppLocalStates == 0 && baseAcct.TotalAssetParams == 0 && baseAcct.TotalAssets == 0
if !emptyBaseAcct && resData != nil {
if encodedRecord.Resources == nil {
@@ -4010,47 +4040,56 @@ func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, tx *sql.Tx,
}
encodedRecord.Resources[uint64(cidx)] = encodedResourceData
if resData.IsApp() && resData.IsOwning() {
- totalAppParams++
+ iterator.acctResCnt.totalAppParams++
}
if resData.IsApp() && resData.IsHolding() {
- totalAppLocalStates++
+ iterator.acctResCnt.totalAppLocalStates++
}
if resData.IsAsset() && resData.IsOwning() {
- totalAssetParams++
+ iterator.acctResCnt.totalAssetParams++
}
if resData.IsAsset() && resData.IsHolding() {
- totalAssets++
+ iterator.acctResCnt.totalAssets++
}
-
+ totalResources++
}
- if baseAcct.TotalAppParams == totalAppParams &&
- baseAcct.TotalAppLocalStates == totalAppLocalStates &&
- baseAcct.TotalAssetParams == totalAssetParams &&
- baseAcct.TotalAssets == totalAssets {
+ if baseAcct.TotalAppParams == iterator.acctResCnt.totalAppParams &&
+ baseAcct.TotalAppLocalStates == iterator.acctResCnt.totalAppLocalStates &&
+ baseAcct.TotalAssetParams == iterator.acctResCnt.totalAssetParams &&
+ baseAcct.TotalAssets == iterator.acctResCnt.totalAssets {
+ encodedRecord.ExpectingMoreEntries = false
bals = append(bals, encodedRecord)
- totalAppParams = 0
- totalAppLocalStates = 0
- totalAssetParams = 0
- totalAssets = 0
+ numAccountsProcessed++
+
+ iterator.acctResCnt = catchpointAccountResourceCounter{}
+
+ return nil
+ }
+
+ // max resources per chunk reached, stop iterating.
+ if lastResource {
+ encodedRecord.ExpectingMoreEntries = true
+ bals = append(bals, encodedRecord)
+ encodedRecord.Resources = nil
}
return nil
}
- _, iterator.nextRow, err = processAllBaseAccountRecords(
+ _, iterator.nextBaseRow, iterator.nextResourceRow, err = processAllBaseAccountRecords(
iterator.accountsRows, iterator.resourcesRows,
baseCb, resCb,
- iterator.nextRow, accountCount,
+ iterator.nextBaseRow, iterator.nextResourceRow, accountCount, resourceCount,
)
if err != nil {
iterator.Close()
return
}
- if len(bals) == accountCount {
+ if len(bals) == accountCount || totalResources == resourceCount {
// we're done with this iteration.
return
}
@@ -4106,27 +4145,37 @@ const (
// orderedAccountsIter allows us to iterate over the accounts addresses in the order of the account hashes.
type orderedAccountsIter struct {
- step orderedAccountsIterStep
- accountBaseRows *sql.Rows
- hashesRows *sql.Rows
- resourcesRows *sql.Rows
- tx *sql.Tx
- pendingRow pendingRow
- accountCount int
- insertStmt *sql.Stmt
+ step orderedAccountsIterStep
+ accountBaseRows *sql.Rows
+ hashesRows *sql.Rows
+ resourcesRows *sql.Rows
+ tx *sql.Tx
+ pendingBaseRow pendingBaseRow
+ pendingResourceRow pendingResourceRow
+ accountCount int
+ resourceCount int
+ insertStmt *sql.Stmt
}
// makeOrderedAccountsIter creates an ordered account iterator. Note that due to implementation reasons,
// only a single iterator can be active at a time.
-func makeOrderedAccountsIter(tx *sql.Tx, accountCount int) *orderedAccountsIter {
+func makeOrderedAccountsIter(tx *sql.Tx, accountCount int, resourceCount int) *orderedAccountsIter {
return &orderedAccountsIter{
- tx: tx,
- accountCount: accountCount,
- step: oaiStepStartup,
+ tx: tx,
+ accountCount: accountCount,
+ resourceCount: resourceCount,
+ step: oaiStepStartup,
}
}
-type pendingRow struct {
+type pendingBaseRow struct {
+ addr basics.Address
+ rowid int64
+ accountData *baseAccountData
+ encodedAccountData []byte
+}
+
+type pendingResourceRow struct {
addrid int64
aidx basics.CreatableIndex
buf []byte
@@ -4134,10 +4183,11 @@ type pendingRow struct {
func processAllResources(
resRows *sql.Rows,
- addr basics.Address, accountData *baseAccountData, acctRowid int64, pr pendingRow,
- callback func(addr basics.Address, creatableIdx basics.CreatableIndex, resData *resourcesData, encodedResourceData []byte) error,
-) (pendingRow, error) {
+ addr basics.Address, accountData *baseAccountData, acctRowid int64, pr pendingResourceRow, resourceCount int,
+ callback func(addr basics.Address, creatableIdx basics.CreatableIndex, resData *resourcesData, encodedResourceData []byte, lastResource bool) error,
+) (pendingResourceRow, int, error) {
var err error
+ count := 0
// Declare variabled outside of the loop to prevent allocations per iteration.
// At least resData is resolved as "escaped" because of passing it by a pointer to protocol.Decode()
@@ -4152,57 +4202,63 @@ func processAllResources(
// in this case addrid = 3 after processing resources from 1, but acctRowid = 2
// and we need to skip accounts without resources
if pr.addrid > acctRowid {
- err = callback(addr, 0, nil, nil)
- return pr, err
+ err = callback(addr, 0, nil, nil, false)
+ return pr, count, err
}
if pr.addrid < acctRowid {
err = fmt.Errorf("resource table entries mismatches accountbase table entries : reached addrid %d while expecting resource for %d", pr.addrid, acctRowid)
- return pendingRow{}, err
+ return pendingResourceRow{}, count, err
}
addrid = pr.addrid
buf = pr.buf
aidx = pr.aidx
- pr = pendingRow{}
+ pr = pendingResourceRow{}
} else {
if !resRows.Next() {
- err = callback(addr, 0, nil, nil)
+ err = callback(addr, 0, nil, nil, false)
if err != nil {
- return pendingRow{}, err
+ return pendingResourceRow{}, count, err
}
break
}
err = resRows.Scan(&addrid, &aidx, &buf)
if err != nil {
- return pendingRow{}, err
+ return pendingResourceRow{}, count, err
}
if addrid < acctRowid {
err = fmt.Errorf("resource table entries mismatches accountbase table entries : reached addrid %d while expecting resource for %d", addrid, acctRowid)
- return pendingRow{}, err
+ return pendingResourceRow{}, count, err
} else if addrid > acctRowid {
- err = callback(addr, 0, nil, nil)
- return pendingRow{addrid, aidx, buf}, err
+ err = callback(addr, 0, nil, nil, false)
+ return pendingResourceRow{addrid, aidx, buf}, count, err
}
}
resData = resourcesData{}
err = protocol.Decode(buf, &resData)
if err != nil {
- return pendingRow{}, err
+ return pendingResourceRow{}, count, err
+ }
+ count++
+ if resourceCount > 0 && count == resourceCount {
+ // last resource to be included in chunk
+ err := callback(addr, aidx, &resData, buf, true)
+ return pendingResourceRow{}, count, err
}
- err = callback(addr, aidx, &resData, buf)
+ err = callback(addr, aidx, &resData, buf, false)
if err != nil {
- return pendingRow{}, err
+ return pendingResourceRow{}, count, err
}
}
- return pendingRow{}, nil
+ return pendingResourceRow{}, count, nil
}
func processAllBaseAccountRecords(
baseRows *sql.Rows,
resRows *sql.Rows,
baseCb func(addr basics.Address, rowid int64, accountData *baseAccountData, encodedAccountData []byte) error,
- resCb func(addr basics.Address, creatableIdx basics.CreatableIndex, resData *resourcesData, encodedResourceData []byte) error,
- pending pendingRow, accountCount int,
-) (int, pendingRow, error) {
+ resCb func(addr basics.Address, creatableIdx basics.CreatableIndex, resData *resourcesData, encodedResourceData []byte, lastResource bool) error,
+ pendingBase pendingBaseRow, pendingResource pendingResourceRow, accountCount int, resourceCount int,
+) (int, pendingBaseRow, pendingResourceRow, error) {
var addr basics.Address
var prevAddr basics.Address
var err error
@@ -4212,44 +4268,70 @@ func processAllBaseAccountRecords(
var addrbuf []byte
var buf []byte
var rowid int64
- for baseRows.Next() {
- err = baseRows.Scan(&rowid, &addrbuf, &buf)
- if err != nil {
- return 0, pendingRow{}, err
- }
+ for {
+ if pendingBase.rowid != 0 {
+ addr = pendingBase.addr
+ rowid = pendingBase.rowid
+ accountData = *pendingBase.accountData
+ buf = pendingBase.encodedAccountData
+ pendingBase = pendingBaseRow{}
+ } else {
+ if !baseRows.Next() {
+ break
+ }
- if len(addrbuf) != len(addr) {
- err = fmt.Errorf("account DB address length mismatch: %d != %d", len(addrbuf), len(addr))
- return 0, pendingRow{}, err
- }
+ err = baseRows.Scan(&rowid, &addrbuf, &buf)
+ if err != nil {
+ return 0, pendingBaseRow{}, pendingResourceRow{}, err
+ }
- copy(addr[:], addrbuf)
+ if len(addrbuf) != len(addr) {
+ err = fmt.Errorf("account DB address length mismatch: %d != %d", len(addrbuf), len(addr))
+ return 0, pendingBaseRow{}, pendingResourceRow{}, err
+ }
- accountData = baseAccountData{}
- err = protocol.Decode(buf, &accountData)
- if err != nil {
- return 0, pendingRow{}, err
+ copy(addr[:], addrbuf)
+
+ accountData = baseAccountData{}
+ err = protocol.Decode(buf, &accountData)
+ if err != nil {
+ return 0, pendingBaseRow{}, pendingResourceRow{}, err
+ }
}
+
err = baseCb(addr, rowid, &accountData, buf)
if err != nil {
- return 0, pendingRow{}, err
+ return 0, pendingBaseRow{}, pendingResourceRow{}, err
}
- pending, err = processAllResources(resRows, addr, &accountData, rowid, pending, resCb)
+ var resourcesProcessed int
+ pendingResource, resourcesProcessed, err = processAllResources(resRows, addr, &accountData, rowid, pendingResource, resourceCount, resCb)
if err != nil {
err = fmt.Errorf("failed to gather resources for account %v, addrid %d, prev address %v : %w", addr, rowid, prevAddr, err)
- return 0, pendingRow{}, err
+ return 0, pendingBaseRow{}, pendingResourceRow{}, err
}
+ if resourcesProcessed == resourceCount {
+ // we're done with this iteration.
+ pendingBase := pendingBaseRow{
+ addr: addr,
+ rowid: rowid,
+ accountData: &accountData,
+ encodedAccountData: buf,
+ }
+ return count, pendingBase, pendingResource, nil
+ }
+ resourceCount -= resourcesProcessed
+
count++
if accountCount > 0 && count == accountCount {
// we're done with this iteration.
- return count, pending, nil
+ return count, pendingBaseRow{}, pendingResource, nil
}
prevAddr = addr
}
- return count, pending, nil
+ return count, pendingBaseRow{}, pendingResource, nil
}
// loadFullAccount converts baseAccountData into basics.AccountData and loads all resources as needed
@@ -4458,7 +4540,7 @@ func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []accountAd
return nil
}
- resCb := func(addr basics.Address, cidx basics.CreatableIndex, resData *resourcesData, encodedResourceData []byte) error {
+ resCb := func(addr basics.Address, cidx basics.CreatableIndex, resData *resourcesData, encodedResourceData []byte, lastResource bool) error {
var err error
if resData != nil {
var ctype basics.CreatableType
@@ -4477,10 +4559,10 @@ func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []accountAd
}
count := 0
- count, iterator.pendingRow, err = processAllBaseAccountRecords(
+ count, iterator.pendingBaseRow, iterator.pendingResourceRow, err = processAllBaseAccountRecords(
iterator.accountBaseRows, iterator.resourcesRows,
baseCb, resCb,
- iterator.pendingRow, iterator.accountCount,
+ iterator.pendingBaseRow, iterator.pendingResourceRow, iterator.accountCount, iterator.resourceCount,
)
if err != nil {
iterator.Close(ctx)
@@ -4679,7 +4761,7 @@ func (prd *persistedResourcesData) before(other *persistedResourcesData) bool {
// before compares the round numbers of two persistedAccountData and determines if the current persistedAccountData
// happened before the other.
func (pac *persistedOnlineAccountData) before(other *persistedOnlineAccountData) bool {
- return pac.round < other.round
+ return pac.updRound < other.updRound
}
// txTailRoundLease is used as part of txTailRound for storing
diff --git a/ledger/acctonline.go b/ledger/acctonline.go
index cb09d7c82..03af7908d 100644
--- a/ledger/acctonline.go
+++ b/ledger/acctonline.go
@@ -634,7 +634,6 @@ func (ao *onlineAccounts) lookupOnlineAccountData(rnd basics.Round, addr basics.
}
// the round number cannot be found in deltas, it is in history
inHistory = true
- err = nil
}
paramsOffset, err = ao.roundParamsOffset(rnd)
if err != nil {
@@ -764,7 +763,6 @@ func (ao *onlineAccounts) TopOnlineAccounts(rnd basics.Round, voteRnd basics.Rou
}
// the round number cannot be found in deltas, it is in history
inMemory = false
- err = nil
}
modifiedAccounts := make(map[basics.Address]*ledgercore.OnlineAccount)
diff --git a/ledger/acctonline_test.go b/ledger/acctonline_test.go
index 8ebd160bc..67871c23e 100644
--- a/ledger/acctonline_test.go
+++ b/ledger/acctonline_test.go
@@ -1111,6 +1111,113 @@ func TestAcctOnlineCacheDBSync(t *testing.T) {
})
}
+// TestAcctOnlineBaseAccountCache checks the data correctness for a case when
+// some accounts gets online and then offline in the same commit range,
+// and then online again in the next range with the same voting data
+func TestAcctOnlineBaseAccountCache(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ const seedLookback = 2
+ const seedInteval = 3
+ const maxBalLookback = 2 * seedLookback * seedInteval
+
+ const numAccts = 5 // does not matter, some number of accounts
+ allAccts := make([]basics.BalanceRecord, numAccts)
+ genesisAccts := []map[basics.Address]basics.AccountData{{}}
+ genesisAccts[0] = make(map[basics.Address]basics.AccountData, numAccts)
+ var addrA basics.Address
+ for i := 0; i < numAccts; i++ {
+ allAccts[i] = basics.BalanceRecord{
+ Addr: ledgertesting.RandomAddress(),
+ AccountData: ledgertesting.RandomOnlineAccountData(0),
+ }
+ if i == 0 {
+ addrA = allAccts[i].Addr
+ allAccts[i].AccountData.Status = basics.Offline
+ allAccts[i].AccountData.VoteLastValid = 0
+ }
+ genesisAccts[0][allAccts[i].Addr] = allAccts[i].AccountData
+ }
+
+ addSinkAndPoolAccounts(genesisAccts)
+
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestAcctOnlineBaseAccountCache")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.MaxBalLookback = maxBalLookback
+ protoParams.SeedLookback = seedLookback
+ protoParams.SeedRefreshInterval = seedInteval
+ config.Consensus[testProtocolVersion] = protoParams
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ ml := makeMockLedgerForTracker(t, true, 1, testProtocolVersion, genesisAccts)
+ defer ml.Close()
+ conf := config.GetDefaultLocal()
+ conf.MaxAcctLookback = maxBalLookback
+
+ au, oa := newAcctUpdates(t, ml, conf)
+ defer oa.close()
+ _, totals, err := au.LatestTotals()
+ require.NoError(t, err)
+
+ accounts := genesisAccts
+
+ acctDatas := [3]ledgercore.AccountData{
+ {AccountBaseData: ledgercore.AccountBaseData{Status: basics.Online}, VotingData: ledgercore.VotingData{VoteLastValid: basics.Round(1000 + maxBalLookback)}},
+ {AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline}, VotingData: ledgercore.VotingData{}},
+ {AccountBaseData: ledgercore.AccountBaseData{Status: basics.Online}, VotingData: ledgercore.VotingData{VoteLastValid: basics.Round(1000 + maxBalLookback)}},
+ }
+ // set online, offline, online
+ for i := 1; i <= 3; i++ {
+ var updates ledgercore.AccountDeltas
+ updates.Upsert(addrA, acctDatas[i-1])
+ base := accounts[i-1]
+ newAccts := applyPartialDeltas(base, updates)
+ accounts = append(accounts, newAccts)
+ totals = newBlock(t, ml, testProtocolVersion, protoParams, basics.Round(i), base, updates, totals)
+ }
+
+ // add maxBalLookback + 2 empty blocks and next commit would commit the first two rounds
+ for i := 4; i <= maxBalLookback+2; i++ {
+ var updates ledgercore.AccountDeltas
+ base := accounts[i-1]
+ totals = newBlock(t, ml, testProtocolVersion, protoParams, basics.Round(i), base, updates, totals)
+ accounts = append(accounts, base)
+ }
+
+ rnd := maxBalLookback + 2
+ commitSync(t, oa, ml, basics.Round(rnd))
+ poad, has := oa.baseOnlineAccounts.read(addrA)
+ require.True(t, has)
+ require.Empty(t, poad.accountData)
+
+ data, err := oa.lookupOnlineAccountData(2, addrA)
+ require.NoError(t, err)
+ require.Empty(t, data.VotingData.VoteLastValid)
+
+ // add one more and next commit would commit the third rounds
+ {
+ i := rnd + 1
+ var updates ledgercore.AccountDeltas
+ base := accounts[i-1]
+ totals = newBlock(t, ml, testProtocolVersion, protoParams, basics.Round(i), base, updates, totals)
+ commitSync(t, oa, ml, basics.Round(i))
+ }
+
+ poad, has = oa.baseOnlineAccounts.read(addrA)
+ require.True(t, has)
+ require.NotEmpty(t, poad.accountData)
+
+ data, err = oa.lookupOnlineAccountData(basics.Round(3), addrA)
+ require.NoError(t, err)
+ require.NotEmpty(t, data.VotingData.VoteLastValid)
+
+ data, err = oa.lookupOnlineAccountData(basics.Round(rnd+1), addrA)
+ require.NoError(t, err)
+ require.NotEmpty(t, data.VotingData.VoteLastValid)
+}
+
func TestAcctOnlineVotersLongerHistory(t *testing.T) {
partitiontest.PartitionTest(t)
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index 852df6df4..db2a99126 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -1041,14 +1041,17 @@ func (au *accountUpdates) lookupResource(rnd basics.Round, addr basics.Address,
// a separate transaction here, and directly use a prepared SQL query
// against the database.
persistedData, err = au.accountsq.lookupResources(addr, aidx, ctype)
+ if err != nil {
+ return ledgercore.AccountResource{}, basics.Round(0), err
+ }
if persistedData.round == currentDbRound {
if persistedData.addrid != 0 {
// if we read actual data return it
au.baseResources.writePending(persistedData, addr)
- return persistedData.AccountResource(), rnd, err
+ return persistedData.AccountResource(), rnd, nil
}
// otherwise return empty
- return ledgercore.AccountResource{}, rnd, err
+ return ledgercore.AccountResource{}, rnd, nil
}
if synchronized {
if persistedData.round < currentDbRound {
@@ -1140,19 +1143,22 @@ func (au *accountUpdates) lookupWithoutRewards(rnd basics.Round, addr basics.Add
// a separate transaction here, and directly use a prepared SQL query
// against the database.
persistedData, err = au.accountsq.lookup(addr)
+ if err != nil {
+ return ledgercore.AccountData{}, basics.Round(0), "", 0, err
+ }
if persistedData.round == currentDbRound {
if persistedData.rowid != 0 {
// if we read actual data return it
au.baseAccounts.writePending(persistedData)
- return persistedData.accountData.GetLedgerCoreAccountData(), rnd, rewardsVersion, rewardsLevel, err
+ return persistedData.accountData.GetLedgerCoreAccountData(), rnd, rewardsVersion, rewardsLevel, nil
}
// otherwise return empty
- return ledgercore.AccountData{}, rnd, rewardsVersion, rewardsLevel, err
+ return ledgercore.AccountData{}, rnd, rewardsVersion, rewardsLevel, nil
}
if synchronized {
if persistedData.round < currentDbRound {
au.log.Errorf("accountUpdates.lookupWithoutRewards: database round %d is behind in-memory round %d", persistedData.round, currentDbRound)
- return ledgercore.AccountData{}, basics.Round(0), rewardsVersion, rewardsLevel, &StaleDatabaseRoundError{databaseRound: persistedData.round, memoryRound: currentDbRound}
+ return ledgercore.AccountData{}, basics.Round(0), "", 0, &StaleDatabaseRoundError{databaseRound: persistedData.round, memoryRound: currentDbRound}
}
au.accountsMu.RLock()
needUnlock = true
@@ -1162,7 +1168,7 @@ func (au *accountUpdates) lookupWithoutRewards(rnd basics.Round, addr basics.Add
} else {
// in non-sync mode, we don't wait since we already assume that we're synchronized.
au.log.Errorf("accountUpdates.lookupWithoutRewards: database round %d mismatching in-memory round %d", persistedData.round, currentDbRound)
- return ledgercore.AccountData{}, basics.Round(0), rewardsVersion, rewardsLevel, &MismatchingDatabaseRoundError{databaseRound: persistedData.round, memoryRound: currentDbRound}
+ return ledgercore.AccountData{}, basics.Round(0), "", 0, &MismatchingDatabaseRoundError{databaseRound: persistedData.round, memoryRound: currentDbRound}
}
}
}
@@ -1219,9 +1225,11 @@ func (au *accountUpdates) getCreatorForRound(rnd basics.Round, cidx basics.Creat
}
// Check the database
creator, ok, dbRound, err = au.accountsq.lookupCreator(cidx, ctype)
-
+ if err != nil {
+ return basics.Address{}, false, err
+ }
if dbRound == currentDbRound {
- return
+ return creator, ok, nil
}
if synchronized {
if dbRound < currentDbRound {
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index 740ac91d0..edd44549e 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -22,7 +22,6 @@ import (
"database/sql"
"errors"
"fmt"
- "io/ioutil"
"os"
"runtime"
"strings"
@@ -121,12 +120,14 @@ func (ml *mockLedgerForTracker) fork(t testing.TB) *mockLedgerForTracker {
dblogger := logging.TestingLog(t)
dblogger.SetLevel(logging.Info)
newLedgerTracker := &mockLedgerForTracker{
- inMemory: false,
- log: dblogger,
- blocks: make([]blockEntry, len(ml.blocks)),
- deltas: make([]ledgercore.StateDelta, len(ml.deltas)),
- accts: make(map[basics.Address]basics.AccountData),
- filename: fn,
+ inMemory: false,
+ log: dblogger,
+ blocks: make([]blockEntry, len(ml.blocks)),
+ deltas: make([]ledgercore.StateDelta, len(ml.deltas)),
+ accts: make(map[basics.Address]basics.AccountData),
+ filename: fn,
+ consensusParams: ml.consensusParams,
+ consensusVersion: ml.consensusVersion,
}
for k, v := range ml.accts {
newLedgerTracker.accts[k] = v
@@ -138,9 +139,9 @@ func (ml *mockLedgerForTracker) fork(t testing.TB) *mockLedgerForTracker {
ml.dbs.Wdb.Vacuum(context.Background())
// copy the database files.
for _, ext := range []string{"", "-shm", "-wal"} {
- bytes, err := ioutil.ReadFile(ml.filename + ext)
+ bytes, err := os.ReadFile(ml.filename + ext)
require.NoError(t, err)
- err = ioutil.WriteFile(newLedgerTracker.filename+ext, bytes, 0600)
+ err = os.WriteFile(newLedgerTracker.filename+ext, bytes, 0600)
require.NoError(t, err)
}
dbs, err := db.OpenPair(newLedgerTracker.filename, false)
@@ -290,7 +291,7 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, ao *onlineAccounts, base
require.Error(t, err)
require.Equal(t, basics.Round(0), validThrough)
- if base > 0 {
+ if base > 0 && base >= basics.Round(ao.maxBalLookback()) {
_, err := ao.onlineTotals(base - basics.Round(ao.maxBalLookback()))
require.Error(t, err)
diff --git a/ledger/archival_test.go b/ledger/archival_test.go
index cb6dc4ae5..6e61ee0ab 100644
--- a/ledger/archival_test.go
+++ b/ledger/archival_test.go
@@ -143,6 +143,7 @@ func TestArchival(t *testing.T) {
cfg := config.GetDefaultLocal()
cfg.Archival = true
log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
require.NoError(t, err)
defer l.Close()
@@ -720,6 +721,7 @@ func TestArchivalFromNonArchival(t *testing.T) {
cfg.Archival = false
log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
l, err := OpenLedger(log, dbPrefix, inMem, genesisInitState, cfg)
require.NoError(t, err)
blk := genesisInitState.Block
diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go
index 0cfc9089d..5cfe0f3c4 100644
--- a/ledger/catchpointtracker.go
+++ b/ledger/catchpointtracker.go
@@ -1057,7 +1057,7 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account
start := time.Now()
ledgerGeneratecatchpointCount.Inc(nil)
err := ct.dbs.Rdb.Atomic(func(dbCtx context.Context, tx *sql.Tx) (err error) {
- catchpointWriter, err = makeCatchpointWriter(ctx, catchpointDataFilePath, tx)
+ catchpointWriter, err = makeCatchpointWriter(ctx, catchpointDataFilePath, tx, DefaultMaxResourcesPerChunk)
if err != nil {
return
}
@@ -1459,7 +1459,7 @@ func (ct *catchpointTracker) accountsInitializeHashes(ctx context.Context, tx *s
if rootHash.IsZero() {
ct.log.Infof("accountsInitialize rebuilding merkle trie for round %d", rnd)
- accountBuilderIt := makeOrderedAccountsIter(tx, trieRebuildAccountChunkSize)
+ accountBuilderIt := makeOrderedAccountsIter(tx, trieRebuildAccountChunkSize, DefaultMaxResourcesPerChunk)
defer accountBuilderIt.Close(ctx)
startTrieBuildTime := time.Now()
trieHashCount := 0
diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go
index 2d7bebcd5..a12e6fa9a 100644
--- a/ledger/catchpointtracker_test.go
+++ b/ledger/catchpointtracker_test.go
@@ -21,7 +21,6 @@ import (
"database/sql"
"errors"
"fmt"
- "io/ioutil"
"os"
"path"
"path/filepath"
@@ -102,7 +101,7 @@ func TestGetCatchpointStream(t *testing.T) {
for i := 0; i < filesToCreate; i++ {
fileName := filepath.Join(CatchpointDirName, fmt.Sprintf("%d.catchpoint", i))
data := []byte{byte(i), byte(i + 1), byte(i + 2)}
- err = ioutil.WriteFile(filepath.Join(temporaryDirectory, fileName), data, 0666)
+ err = os.WriteFile(filepath.Join(temporaryDirectory, fileName), data, 0666)
require.NoError(t, err)
// Store the catchpoint into the database
diff --git a/ledger/catchpointwriter.go b/ledger/catchpointwriter.go
index 7b7f07d2f..6f1e11dfe 100644
--- a/ledger/catchpointwriter.go
+++ b/ledger/catchpointwriter.go
@@ -35,6 +35,10 @@ const (
// BalancesPerCatchpointFileChunk defines the number of accounts that would be stored in each chunk in the catchpoint file.
// note that the last chunk would typically be less than this number.
BalancesPerCatchpointFileChunk = 512
+
+ // DefaultMaxResourcesPerChunk defines the max number of resources that go in a singular chunk
+ // 300000 resources * 300B/resource => roughly max 100MB per chunk
+ DefaultMaxResourcesPerChunk = 300000
)
// catchpointWriter is the struct managing the persistence of accounts data into the catchpoint file.
@@ -42,19 +46,21 @@ const (
// the writing is complete. It might take multiple steps until the operation is over, and the caller
// has the option of throttling the CPU utilization in between the calls.
type catchpointWriter struct {
- ctx context.Context
- tx *sql.Tx
- filePath string
- totalAccounts uint64
- totalChunks uint64
- file *os.File
- tar *tar.Writer
- compressor io.WriteCloser
- balancesChunk catchpointFileBalancesChunkV6
- balancesChunkNum uint64
- writtenBytes int64
- biggestChunkLen uint64
- accountsIterator encodedAccountsBatchIter
+ ctx context.Context
+ tx *sql.Tx
+ filePath string
+ totalAccounts uint64
+ totalChunks uint64
+ file *os.File
+ tar *tar.Writer
+ compressor io.WriteCloser
+ balancesChunk catchpointFileBalancesChunkV6
+ balancesChunkNum uint64
+ numAccountsProcessed uint64
+ writtenBytes int64
+ biggestChunkLen uint64
+ accountsIterator encodedAccountsBatchIter
+ maxResourcesPerChunk int
}
type encodedBalanceRecordV5 struct {
@@ -79,14 +85,18 @@ type encodedBalanceRecordV6 struct {
Address basics.Address `codec:"a,allocbound=crypto.DigestSize"`
AccountData msgp.Raw `codec:"b,allocbound=basics.MaxEncodedAccountDataSize"`
Resources map[uint64]msgp.Raw `codec:"c,allocbound=basics.MaxEncodedAccountDataSize"`
+
+ // flag indicating whether there are more records for the same account coming up
+ ExpectingMoreEntries bool `codec:"e"`
}
type catchpointFileBalancesChunkV6 struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
- Balances []encodedBalanceRecordV6 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"`
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+ Balances []encodedBalanceRecordV6 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"`
+ numAccounts uint64
}
-func makeCatchpointWriter(ctx context.Context, filePath string, tx *sql.Tx) (*catchpointWriter, error) {
+func makeCatchpointWriter(ctx context.Context, filePath string, tx *sql.Tx, maxResourcesPerChunk int) (*catchpointWriter, error) {
totalAccounts, err := totalAccounts(ctx, tx)
if err != nil {
return nil, err
@@ -107,14 +117,15 @@ func makeCatchpointWriter(ctx context.Context, filePath string, tx *sql.Tx) (*ca
tar := tar.NewWriter(compressor)
res := &catchpointWriter{
- ctx: ctx,
- tx: tx,
- filePath: filePath,
- totalAccounts: totalAccounts,
- totalChunks: (totalAccounts + BalancesPerCatchpointFileChunk - 1) / BalancesPerCatchpointFileChunk,
- file: file,
- compressor: compressor,
- tar: tar,
+ ctx: ctx,
+ tx: tx,
+ filePath: filePath,
+ totalAccounts: totalAccounts,
+ totalChunks: (totalAccounts + BalancesPerCatchpointFileChunk - 1) / BalancesPerCatchpointFileChunk,
+ file: file,
+ compressor: compressor,
+ tar: tar,
+ maxResourcesPerChunk: maxResourcesPerChunk,
}
return res, nil
}
@@ -135,7 +146,7 @@ func (cw *catchpointWriter) WriteStep(stepCtx context.Context) (more bool, err e
writerRequest := make(chan catchpointFileBalancesChunkV6, 1)
writerResponse := make(chan error, 2)
- go cw.asyncWriter(writerRequest, writerResponse, cw.balancesChunkNum)
+ go cw.asyncWriter(writerRequest, writerResponse, cw.balancesChunkNum, cw.numAccountsProcessed)
defer func() {
close(writerRequest)
// wait for the writerResponse to close.
@@ -180,9 +191,10 @@ func (cw *catchpointWriter) WriteStep(stepCtx context.Context) (more bool, err e
// write to disk.
if len(cw.balancesChunk.Balances) > 0 {
+ cw.numAccountsProcessed += cw.balancesChunk.numAccounts
cw.balancesChunkNum++
writerRequest <- cw.balancesChunk
- if len(cw.balancesChunk.Balances) < BalancesPerCatchpointFileChunk || cw.balancesChunkNum == cw.totalChunks {
+ if cw.numAccountsProcessed == cw.totalAccounts {
cw.accountsIterator.Close()
// if we're done, wait for the writer to complete it's writing.
err, opened := <-writerResponse
@@ -199,11 +211,13 @@ func (cw *catchpointWriter) WriteStep(stepCtx context.Context) (more bool, err e
}
}
-func (cw *catchpointWriter) asyncWriter(balances chan catchpointFileBalancesChunkV6, response chan error, initialBalancesChunkNum uint64) {
+func (cw *catchpointWriter) asyncWriter(balances chan catchpointFileBalancesChunkV6, response chan error, initialBalancesChunkNum uint64, initialNumAccounts uint64) {
defer close(response)
balancesChunkNum := initialBalancesChunkNum
+ numAccountsProcessed := initialNumAccounts
for bc := range balances {
balancesChunkNum++
+ numAccountsProcessed += bc.numAccounts
if len(bc.Balances) == 0 {
break
}
@@ -226,8 +240,7 @@ func (cw *catchpointWriter) asyncWriter(balances chan catchpointFileBalancesChun
if chunkLen := uint64(len(encodedChunk)); cw.biggestChunkLen < chunkLen {
cw.biggestChunkLen = chunkLen
}
-
- if len(bc.Balances) < BalancesPerCatchpointFileChunk || balancesChunkNum == cw.totalChunks {
+ if numAccountsProcessed == cw.totalAccounts {
cw.tar.Close()
cw.compressor.Close()
cw.file.Close()
@@ -244,7 +257,7 @@ func (cw *catchpointWriter) asyncWriter(balances chan catchpointFileBalancesChun
}
func (cw *catchpointWriter) readDatabaseStep(ctx context.Context, tx *sql.Tx) (err error) {
- cw.balancesChunk.Balances, err = cw.accountsIterator.Next(ctx, tx, BalancesPerCatchpointFileChunk)
+ cw.balancesChunk.Balances, cw.balancesChunk.numAccounts, err = cw.accountsIterator.Next(ctx, tx, BalancesPerCatchpointFileChunk, cw.maxResourcesPerChunk)
return
}
diff --git a/ledger/catchpointwriter_test.go b/ledger/catchpointwriter_test.go
index fa1819d97..e5765e506 100644
--- a/ledger/catchpointwriter_test.go
+++ b/ledger/catchpointwriter_test.go
@@ -24,7 +24,7 @@ import (
"database/sql"
"fmt"
"io"
- "io/ioutil"
+ "os"
"path/filepath"
"runtime"
"testing"
@@ -199,7 +199,7 @@ func TestBasicCatchpointWriter(t *testing.T) {
protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
protoParams.CatchpointLookback = 32
config.Consensus[testProtocolVersion] = protoParams
- temporaryDirectroy := t.TempDir()
+ temporaryDirectory := t.TempDir()
defer func() {
delete(config.Consensus, testProtocolVersion)
}()
@@ -215,11 +215,11 @@ func TestBasicCatchpointWriter(t *testing.T) {
err := au.loadFromDisk(ml, 0)
require.NoError(t, err)
au.close()
- fileName := filepath.Join(temporaryDirectroy, "15.data")
+ fileName := filepath.Join(temporaryDirectory, "15.data")
readDb := ml.trackerDB().Rdb
err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- writer, err := makeCatchpointWriter(context.Background(), fileName, tx)
+ writer, err := makeCatchpointWriter(context.Background(), fileName, tx, DefaultMaxResourcesPerChunk)
if err != nil {
return err
}
@@ -235,7 +235,7 @@ func TestBasicCatchpointWriter(t *testing.T) {
require.NoError(t, err)
// load the file from disk.
- fileContent, err := ioutil.ReadFile(fileName)
+ fileContent, err := os.ReadFile(fileName)
require.NoError(t, err)
compressorReader, err := catchpointStage1Decoder(bytes.NewBuffer(fileContent))
require.NoError(t, err)
@@ -306,7 +306,314 @@ func TestFullCatchpointWriter(t *testing.T) {
var accountsRnd basics.Round
var totals ledgercore.AccountTotals
err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- writer, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx)
+ writer, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx, DefaultMaxResourcesPerChunk)
+ if err != nil {
+ return err
+ }
+ for {
+ more, err := writer.WriteStep(context.Background())
+ require.NoError(t, err)
+ if !more {
+ break
+ }
+ }
+ totalAccounts = writer.GetTotalAccounts()
+ totalChunks = writer.GetTotalChunks()
+ biggestChunkLen = writer.GetBiggestChunkLen()
+ accountsRnd, err = accountsRound(tx)
+ if err != nil {
+ return
+ }
+ totals, err = accountsTotals(ctx, tx, false)
+ return
+ })
+ require.NoError(t, err)
+ blocksRound := accountsRnd + 1
+ blockHeaderDigest := crypto.Hash([]byte{1, 2, 3})
+ catchpointLabel := fmt.Sprintf("%d#%v", blocksRound, blockHeaderDigest) // this is not a correct way to create a label, but it's good enough for this unit test
+ catchpointFileHeader := CatchpointFileHeader{
+ Version: CatchpointFileVersionV6,
+ BalancesRound: accountsRnd,
+ BlocksRound: blocksRound,
+ Totals: totals,
+ TotalAccounts: totalAccounts,
+ TotalChunks: totalChunks,
+ Catchpoint: catchpointLabel,
+ BlockHeaderDigest: blockHeaderDigest,
+ }
+ err = repackCatchpoint(
+ context.Background(), catchpointFileHeader, biggestChunkLen,
+ catchpointDataFilePath, catchpointFilePath)
+ require.NoError(t, err)
+
+ // create a ledger.
+ var initState ledgercore.InitState
+ initState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ l, err := OpenLedger(ml.log, "TestFullCatchpointWriter", true, initState, conf)
+ require.NoError(t, err)
+ defer l.Close()
+ accessor := MakeCatchpointCatchupAccessor(l, l.log)
+
+ err = accessor.ResetStagingBalances(context.Background(), true)
+ require.NoError(t, err)
+
+ // load the file from disk.
+ fileContent, err := os.ReadFile(catchpointFilePath)
+ require.NoError(t, err)
+ gzipReader, err := gzip.NewReader(bytes.NewBuffer(fileContent))
+ require.NoError(t, err)
+ tarReader := tar.NewReader(gzipReader)
+ var catchupProgress CatchpointCatchupAccessorProgress
+ defer gzipReader.Close()
+ for {
+ header, err := tarReader.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ require.NoError(t, err)
+ break
+ }
+ balancesBlockBytes := make([]byte, header.Size)
+ readComplete := int64(0)
+
+ for readComplete < header.Size {
+ bytesRead, err := tarReader.Read(balancesBlockBytes[readComplete:])
+ readComplete += int64(bytesRead)
+ if err != nil {
+ if err == io.EOF {
+ if readComplete == header.Size {
+ break
+ }
+ require.NoError(t, err)
+ }
+ break
+ }
+ }
+ err = accessor.ProgressStagingBalances(context.Background(), header.Name, balancesBlockBytes, &catchupProgress)
+ require.NoError(t, err)
+ }
+
+ err = l.trackerDBs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ err := applyCatchpointStagingBalances(ctx, tx, 0, 0)
+ return err
+ })
+ require.NoError(t, err)
+
+ // verify that the account data aligns with what we originally stored :
+ for addr, acct := range accts {
+ acctData, validThrough, _, err := l.LookupLatest(addr)
+ require.NoErrorf(t, err, "failed to lookup for account %v after restoring from catchpoint", addr)
+ require.Equal(t, acct, acctData)
+ require.Equal(t, basics.Round(0), validThrough)
+ }
+}
+
+func TestCatchpointReadDatabaseOverflowSingleAccount(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // create new protocol version, which has lower lookback
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestFullCatchpointWriter")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.CatchpointLookback = 32
+ config.Consensus[testProtocolVersion] = protoParams
+ temporaryDirectory := t.TempDir()
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ maxResourcesPerChunk := 5
+
+ accts := ledgertesting.RandomAccounts(1, false)
+ // force acct to have overflowing number of resources
+ assetIndex := 1000
+ for addr, acct := range accts {
+ if acct.AssetParams == nil {
+ acct.AssetParams = make(map[basics.AssetIndex]basics.AssetParams, 0)
+ accts[addr] = acct
+ }
+ for i := uint64(0); i < 20; i++ {
+ ap := ledgertesting.RandomAssetParams()
+ acct.AssetParams[basics.AssetIndex(assetIndex)] = ap
+ assetIndex++
+ }
+ }
+
+ ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion, []map[basics.Address]basics.AccountData{accts})
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ conf.CatchpointInterval = 1
+ conf.Archival = true
+ au, _ := newAcctUpdates(t, ml, conf)
+ err := au.loadFromDisk(ml, 0)
+ require.NoError(t, err)
+ au.close()
+ catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data")
+ readDb := ml.trackerDB().Rdb
+
+ err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ expectedTotalAccounts := uint64(1)
+ totalAccountsWritten := uint64(0)
+ totalResources := 0
+ totalChunks := 0
+ var expectedTotalResources int
+ cw, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx, maxResourcesPerChunk)
+ err = cw.tx.QueryRowContext(cw.ctx, "SELECT count(1) FROM resources").Scan(&expectedTotalResources)
+ if err != nil {
+ return err
+ }
+ // repeat this until read all accts
+ for totalAccountsWritten < expectedTotalAccounts {
+ cw.balancesChunk.Balances = nil
+ err := cw.readDatabaseStep(cw.ctx, cw.tx)
+ if err != nil {
+ return err
+ }
+ totalAccountsWritten += cw.balancesChunk.numAccounts
+ numResources := 0
+ for _, balance := range cw.balancesChunk.Balances {
+ numResources += len(balance.Resources)
+ }
+ if numResources > maxResourcesPerChunk {
+ return fmt.Errorf("too many resources in this chunk: found %d resources, maximum %d resources", numResources, maxResourcesPerChunk)
+ }
+ totalResources += numResources
+ totalChunks++
+ }
+
+ if totalChunks <= 1 {
+ return fmt.Errorf("expected more than one chunk due to overflow")
+ }
+
+ if expectedTotalResources != totalResources {
+ return fmt.Errorf("total resources did not match: expected %d, actual %d", expectedTotalResources, totalResources)
+ }
+
+ return
+ })
+
+ require.NoError(t, err)
+}
+
+func TestCatchpointReadDatabaseOverflowAccounts(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // create new protocol version, which has lower lookback
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestFullCatchpointWriter")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.CatchpointLookback = 32
+ config.Consensus[testProtocolVersion] = protoParams
+ temporaryDirectory := t.TempDir()
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ maxResourcesPerChunk := 5
+
+ accts := ledgertesting.RandomAccounts(5, false)
+ // force each acct to have overflowing number of resources
+ assetIndex := 1000
+ for addr, acct := range accts {
+ if acct.AssetParams == nil {
+ acct.AssetParams = make(map[basics.AssetIndex]basics.AssetParams, 0)
+ accts[addr] = acct
+ }
+ for i := uint64(0); i < 20; i++ {
+ ap := ledgertesting.RandomAssetParams()
+ acct.AssetParams[basics.AssetIndex(assetIndex)] = ap
+ assetIndex++
+ }
+ }
+
+ ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion, []map[basics.Address]basics.AccountData{accts})
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ conf.CatchpointInterval = 1
+ conf.Archival = true
+ au, _ := newAcctUpdates(t, ml, conf)
+ err := au.loadFromDisk(ml, 0)
+ require.NoError(t, err)
+ au.close()
+ catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data")
+ readDb := ml.trackerDB().Rdb
+
+ err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ expectedTotalAccounts, err := totalAccounts(ctx, tx)
+ if err != nil {
+ return err
+ }
+ totalAccountsWritten := uint64(0)
+ totalResources := 0
+ var expectedTotalResources int
+ cw, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx, maxResourcesPerChunk)
+ err = cw.tx.QueryRowContext(cw.ctx, "SELECT count(1) FROM resources").Scan(&expectedTotalResources)
+ if err != nil {
+ return err
+ }
+ // repeat this until read all accts
+ for totalAccountsWritten < expectedTotalAccounts {
+ cw.balancesChunk.Balances = nil
+ err := cw.readDatabaseStep(cw.ctx, cw.tx)
+ if err != nil {
+ return err
+ }
+ totalAccountsWritten += cw.balancesChunk.numAccounts
+ numResources := 0
+ for _, balance := range cw.balancesChunk.Balances {
+ numResources += len(balance.Resources)
+ }
+ if numResources > maxResourcesPerChunk {
+ return fmt.Errorf("too many resources in this chunk: found %d resources, maximum %d resources", numResources, maxResourcesPerChunk)
+ }
+ totalResources += numResources
+ }
+
+ if expectedTotalResources != totalResources {
+ return fmt.Errorf("total resources did not match: expected %d, actual %d", expectedTotalResources, totalResources)
+ }
+
+ return
+ })
+
+ require.NoError(t, err)
+}
+
+func TestFullCatchpointWriterOverflowAccounts(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // create new protocol version, which has lower lookback
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestFullCatchpointWriter")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.CatchpointLookback = 32
+ config.Consensus[testProtocolVersion] = protoParams
+ temporaryDirectory := t.TempDir()
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ accts := ledgertesting.RandomAccounts(BalancesPerCatchpointFileChunk*3, false)
+ ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion, []map[basics.Address]basics.AccountData{accts})
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ conf.CatchpointInterval = 1
+ conf.Archival = true
+ au, _ := newAcctUpdates(t, ml, conf)
+ err := au.loadFromDisk(ml, 0)
+ require.NoError(t, err)
+ au.close()
+ catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data")
+ catchpointFilePath := filepath.Join(temporaryDirectory, "15.catchpoint")
+ readDb := ml.trackerDB().Rdb
+ var totalAccounts uint64
+ var totalChunks uint64
+ var biggestChunkLen uint64
+ var accountsRnd basics.Round
+ var totals ledgercore.AccountTotals
+ err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ writer, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx, 5)
if err != nil {
return err
}
@@ -358,7 +665,7 @@ func TestFullCatchpointWriter(t *testing.T) {
require.NoError(t, err)
// load the file from disk.
- fileContent, err := ioutil.ReadFile(catchpointFilePath)
+ fileContent, err := os.ReadFile(catchpointFilePath)
require.NoError(t, err)
gzipReader, err := gzip.NewReader(bytes.NewBuffer(fileContent))
require.NoError(t, err)
diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go
index ec05c86af..3c2f6acee 100644
--- a/ledger/catchupaccessor.go
+++ b/ledger/catchupaccessor.go
@@ -86,6 +86,9 @@ type CatchpointCatchupAccessor interface {
// CompleteCatchup completes the catchpoint catchup process by switching the databases tables around
// and reloading the ledger.
CompleteCatchup(ctx context.Context) (err error)
+
+ // Ledger returns a narrow subset of Ledger methods needed by CatchpointCatchupAccessor clients
+ Ledger() (l CatchupAccessorClientLedger)
}
// CatchpointCatchupAccessorImpl is the concrete implementation of the CatchpointCatchupAccessor interface
@@ -94,6 +97,13 @@ type CatchpointCatchupAccessorImpl struct {
// log copied from ledger
log logging.Logger
+
+ acctResCnt catchpointAccountResourceCounter
+
+ // expecting next account to be a specific account
+ expectingSpecificAccount bool
+ // next expected balance account, empty address if not expecting specific account
+ nextExpectedAccount basics.Address
}
// CatchpointCatchupState is the state of the current catchpoint catchup process
@@ -104,8 +114,8 @@ const (
CatchpointCatchupStateInactive = iota
// CatchpointCatchupStateLedgerDownload indicates that we're downloading the ledger
CatchpointCatchupStateLedgerDownload
- // CatchpointCatchupStateLastestBlockDownload indicates that we're download the latest block
- CatchpointCatchupStateLastestBlockDownload
+ // CatchpointCatchupStateLatestBlockDownload indicates that we're download the latest block
+ CatchpointCatchupStateLatestBlockDownload
// CatchpointCatchupStateBlocksDownload indicates that we're downloading the blocks prior to the latest one ( total of CatchpointLookback blocks )
CatchpointCatchupStateBlocksDownload
// CatchpointCatchupStateSwitch indicates that we're switching to use the downloaded ledger/blocks content
@@ -115,6 +125,14 @@ const (
catchpointCatchupStateLast = CatchpointCatchupStateSwitch
)
+// CatchupAccessorClientLedger represents ledger interface needed for catchpoint accessor clients
+type CatchupAccessorClientLedger interface {
+ Block(rnd basics.Round) (blk bookkeeping.Block, err error)
+ GenesisHash() crypto.Digest
+ BlockHdr(rnd basics.Round) (blk bookkeeping.BlockHeader, err error)
+ Latest() (rnd basics.Round)
+}
+
// MakeCatchpointCatchupAccessor creates a CatchpointCatchupAccessor given a ledger
func MakeCatchpointCatchupAccessor(ledger *Ledger, log logging.Logger) CatchpointCatchupAccessor {
return &CatchpointCatchupAccessorImpl{
@@ -299,6 +317,7 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
ledgerProcessstagingbalancesCount.Inc(nil)
var normalizedAccountBalances []normalizedAccountBalance
+ var expectingMoreEntries []bool
switch progress.Version {
default:
@@ -317,6 +336,7 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
}
normalizedAccountBalances, err = prepareNormalizedBalancesV5(balances.Balances, c.ledger.GenesisProto())
+ expectingMoreEntries = make([]bool, len(balances.Balances))
case CatchpointFileVersionV6:
var balances catchpointFileBalancesChunkV6
@@ -330,12 +350,83 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
}
normalizedAccountBalances, err = prepareNormalizedBalancesV6(balances.Balances, c.ledger.GenesisProto())
+ expectingMoreEntries = make([]bool, len(balances.Balances))
+ for i, balance := range balances.Balances {
+ expectingMoreEntries[i] = balance.ExpectingMoreEntries
+ }
}
if err != nil {
return fmt.Errorf("processStagingBalances failed to prepare normalized balances : %w", err)
}
+ expectingSpecificAccount := c.expectingSpecificAccount
+ nextExpectedAccount := c.nextExpectedAccount
+
+ // keep track of number of resources processed for each account
+ for i, balance := range normalizedAccountBalances {
+ // missing resources for this account
+ if expectingSpecificAccount && balance.address != nextExpectedAccount {
+ return fmt.Errorf("processStagingBalances received incomplete chunks for account %v", nextExpectedAccount)
+ }
+
+ for _, resData := range balance.resources {
+ if resData.IsApp() && resData.IsOwning() {
+ c.acctResCnt.totalAppParams++
+ }
+ if resData.IsApp() && resData.IsHolding() {
+ c.acctResCnt.totalAppLocalStates++
+ }
+ if resData.IsAsset() && resData.IsOwning() {
+ c.acctResCnt.totalAssetParams++
+ }
+ if resData.IsAsset() && resData.IsHolding() {
+ c.acctResCnt.totalAssets++
+ }
+ }
+ // check that counted resources adds up for this account
+ if !expectingMoreEntries[i] {
+ if c.acctResCnt.totalAppParams != balance.accountData.TotalAppParams {
+ return fmt.Errorf(
+ "processStagingBalances received %d appParams for account %v, expected %d",
+ c.acctResCnt.totalAppParams,
+ balance.address,
+ balance.accountData.TotalAppParams,
+ )
+ }
+ if c.acctResCnt.totalAppLocalStates != balance.accountData.TotalAppLocalStates {
+ return fmt.Errorf(
+ "processStagingBalances received %d appLocalStates for account %v, expected %d",
+ c.acctResCnt.totalAppParams,
+ balance.address,
+ balance.accountData.TotalAppLocalStates,
+ )
+ }
+ if c.acctResCnt.totalAssetParams != balance.accountData.TotalAssetParams {
+ return fmt.Errorf(
+ "processStagingBalances received %d assetParams for account %v, expected %d",
+ c.acctResCnt.totalAppParams,
+ balance.address,
+ balance.accountData.TotalAssetParams,
+ )
+ }
+ if c.acctResCnt.totalAssets != balance.accountData.TotalAssets {
+ return fmt.Errorf(
+ "processStagingBalances received %d assets for account %v, expected %d",
+ c.acctResCnt.totalAppParams,
+ balance.address,
+ balance.accountData.TotalAssets,
+ )
+ }
+ c.acctResCnt = catchpointAccountResourceCounter{}
+ nextExpectedAccount = basics.Address{}
+ expectingSpecificAccount = false
+ } else {
+ nextExpectedAccount = balance.address
+ expectingSpecificAccount = true
+ }
+ }
+
wg := sync.WaitGroup{}
var errBalances error
@@ -349,9 +440,9 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
wg.Add(1)
go func() {
defer wg.Done()
- errBalances = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ errBalances = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
start := time.Now()
- err := writeCatchpointStagingBalances(ctx, tx, normalizedAccountBalances)
+ err = writeCatchpointStagingBalances(ctx, tx, normalizedAccountBalances)
durBalances = time.Since(start)
return err
})
@@ -431,6 +522,9 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
// restore "normal" synchronous mode
c.ledger.setSynchronousMode(ctx, c.ledger.synchronousMode)
}
+
+ c.expectingSpecificAccount = expectingSpecificAccount
+ c.nextExpectedAccount = nextExpectedAccount
return err
}
@@ -891,6 +985,11 @@ func (c *CatchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err
return err
}
+// Ledger returns ledger instance as CatchupAccessorClientLedger interface
+func (c *CatchpointCatchupAccessorImpl) Ledger() (l CatchupAccessorClientLedger) {
+ return c.ledger
+}
+
var ledgerResetstagingbalancesCount = metrics.NewCounter("ledger_catchup_resetstagingbalances_count", "calls")
var ledgerResetstagingbalancesMicros = metrics.NewCounter("ledger_catchup_resetstagingbalances_micros", "µs spent")
var ledgerProcessstagingcontentCount = metrics.NewCounter("ledger_catchup_processstagingcontent_count", "calls")
diff --git a/ledger/catchupaccessor_test.go b/ledger/catchupaccessor_test.go
index 88738ee66..50a8d9b57 100644
--- a/ledger/catchupaccessor_test.go
+++ b/ledger/catchupaccessor_test.go
@@ -59,7 +59,10 @@ func createTestingEncodedChunks(accountsCount uint64) (encodedAccountChunks [][]
accountData := baseAccountData{}
accountData.MicroAlgos.Raw = crypto.RandUint63()
randomAccount.AccountData = protocol.Encode(&accountData)
- crypto.RandBytes(randomAccount.Address[:])
+ // have the first account be the zero address
+ if i > 0 {
+ crypto.RandBytes(randomAccount.Address[:])
+ }
binary.LittleEndian.PutUint64(randomAccount.Address[:], accounts+i)
balances.Balances[i] = randomAccount
}
@@ -163,7 +166,7 @@ func TestCatchupAccessorFoo(t *testing.T) {
require.NoError(t, err, "catchpointAccessor.SetState")
err = catchpointAccessor.SetState(context.Background(), CatchpointCatchupStateLedgerDownload)
require.NoError(t, err, "catchpointAccessor.SetState")
- err = catchpointAccessor.SetState(context.Background(), CatchpointCatchupStateLastestBlockDownload)
+ err = catchpointAccessor.SetState(context.Background(), CatchpointCatchupStateLatestBlockDownload)
require.NoError(t, err, "catchpointAccessor.SetState")
err = catchpointAccessor.SetState(context.Background(), CatchpointCatchupStateBlocksDownload)
require.NoError(t, err, "catchpointAccessor.SetState")
@@ -369,3 +372,53 @@ func TestVerifyCatchpoint(t *testing.T) {
require.Error(t, err)
//require.NoError(t, err)
}
+
+func TestCatchupAccessorResourceCountMismatch(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // setup boilerplate
+ log := logging.TestingLog(t)
+ dbBaseFileName := t.Name()
+ const inMem = true
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ cfg := config.GetDefaultLocal()
+ l, err := OpenLedger(log, dbBaseFileName, inMem, genesisInitState, cfg)
+ require.NoError(t, err, "could not open ledger")
+ defer func() {
+ l.Close()
+ }()
+ catchpointAccessor := MakeCatchpointCatchupAccessor(l, log)
+ var progress CatchpointCatchupAccessorProgress
+ ctx := context.Background()
+
+ // content.msgpack from this:
+ fileHeader := CatchpointFileHeader{
+ Version: CatchpointFileVersionV6,
+ BalancesRound: basics.Round(0),
+ BlocksRound: basics.Round(0),
+ Totals: ledgercore.AccountTotals{},
+ TotalAccounts: 1,
+ TotalChunks: 1,
+ Catchpoint: "",
+ BlockHeaderDigest: crypto.Digest{},
+ }
+ encodedFileHeader := protocol.Encode(&fileHeader)
+ err = catchpointAccessor.ProgressStagingBalances(ctx, "content.msgpack", encodedFileHeader, &progress)
+ require.NoError(t, err)
+
+ var balances catchpointFileBalancesChunkV6
+ balances.Balances = make([]encodedBalanceRecordV6, 1)
+ var randomAccount encodedBalanceRecordV6
+ accountData := baseAccountData{}
+ accountData.MicroAlgos.Raw = crypto.RandUint63()
+ accountData.TotalAppParams = 1
+ randomAccount.AccountData = protocol.Encode(&accountData)
+ crypto.RandBytes(randomAccount.Address[:])
+ binary.LittleEndian.PutUint64(randomAccount.Address[:], 0)
+ balances.Balances[0] = randomAccount
+ encodedAccounts := protocol.Encode(&balances)
+
+ // expect error since there is a resource count mismatch
+ err = catchpointAccessor.ProgressStagingBalances(context.Background(), "balances.XX.msgpack", encodedAccounts, &progress)
+ require.Error(t, err)
+}
diff --git a/ledger/evalbench_test.go b/ledger/evalbench_test.go
index 265579cf3..fcbfef681 100644
--- a/ledger/evalbench_test.go
+++ b/ledger/evalbench_test.go
@@ -509,7 +509,7 @@ func benchmarkPreparePaymentTransactionsTesting(b *testing.B, numTxns int, txnSo
require.NoError(b, err)
genHash := l.GenesisHash()
- // apply initialization transations if any
+ // apply initialization transactions if any
initSignedTxns, maxTxnPerBlock := txnSource.Prepare(b, addrs, keys, newBlock.Round(), genHash)
if len(initSignedTxns) > 0 {
diff --git a/ledger/fullblock_perf_test.go b/ledger/fullblock_perf_test.go
new file mode 100644
index 000000000..d2d50cd8f
--- /dev/null
+++ b/ledger/fullblock_perf_test.go
@@ -0,0 +1,638 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "context"
+ "crypto/rand"
+ "encoding/binary"
+ "fmt"
+ mrand "math/rand"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/data/transactions/verify"
+ "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+type benchConfig struct {
+ txnCount uint64
+ round uint64
+ b *testing.B
+ creator basics.Address
+ accts []basics.Address
+ acctToAst map[basics.Address]map[basics.AssetIndex]uint64
+ acctToApp map[basics.Address]map[basics.AppIndex]struct{}
+ l0 *Ledger
+ l1 *Ledger
+ eval *internal.BlockEvaluator
+ numPay uint64
+ numAst uint64
+ numApp uint64
+ blocks []bookkeeping.Block
+}
+
+func setupEnv(b *testing.B, numAccts int) (bc *benchConfig) {
+ dbTempDir := b.TempDir()
+ name := b.Name()
+ dbName := fmt.Sprintf("%s.%d", name, crypto.RandUint64())
+ dbPrefix := filepath.Join(dbTempDir, dbName)
+
+ genesisInitState := getInitState()
+
+ // Use future protocol
+ genesisInitState.Block.BlockHeader.GenesisHash = crypto.Digest{}
+ genesisInitState.Block.CurrentProtocol = protocol.ConsensusFuture
+ genesisInitState.GenesisHash = crypto.Digest{1}
+ genesisInitState.Block.BlockHeader.GenesisHash = crypto.Digest{1}
+
+ // maintain a map from accounts to a map of assets and apps
+ acctToAst := make(map[basics.Address]map[basics.AssetIndex]uint64)
+ acctToApp := make(map[basics.Address]map[basics.AppIndex]struct{})
+ accts := make([]basics.Address, 0, numAccts)
+ // creator is the special rich account
+ creator := basics.Address{}
+ _, err := rand.Read(creator[:])
+ require.NoError(b, err)
+ genesisInitState.Accounts[creator] = basics.MakeAccountData(basics.Offline, basics.MicroAlgos{Raw: 1234567890000000000})
+
+ logger := logging.TestingLog(b)
+ logger.SetLevel(logging.Warn)
+
+ // open 2 ledgers: 1st for preparing the blocks, 2nd for measuring the time
+ inMem := false
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = false
+ cfg.MaxAcctLookback = uint64(b.N) // prevent committing blocks into DB since we benchmark validation
+ cfg.Archival = true
+ cfg.MaxAcctLookback = uint64(b.N) // prevent committing blocks into DB since we benchmark validation
+ l0, err := OpenLedger(logger, dbPrefix, inMem, genesisInitState, cfg)
+ require.NoError(b, err)
+
+ // open second ledger
+ inMem = false
+ cfg.Archival = false
+ cfg.MaxAcctLookback = uint64(b.N) // prevent committing blocks into DB since we benchmark validation
+ dbName = fmt.Sprintf("%s.%d.2", name, crypto.RandUint64())
+ dbPrefix = filepath.Join(dbTempDir, dbName)
+ l1, err := OpenLedger(logger, dbPrefix, inMem, genesisInitState, cfg)
+ require.NoError(b, err)
+
+ // init the first block
+ blk := genesisInitState.Block
+ blk.BlockHeader.Round++
+ blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000)
+ blk.BlockHeader.GenesisID = fmt.Sprintf("%s-genesis", b.Name())
+ cert := agreement.Certificate{}
+
+ err = l0.AddBlock(blk, cert)
+ require.NoError(b, err)
+ err = l1.AddBlock(blk, cert)
+ require.NoError(b, err)
+
+ newBlk := bookkeeping.MakeBlock(blk.BlockHeader)
+ eval, err := l0.StartEvaluator(newBlk.BlockHeader, 5000, 0)
+ require.NoError(b, err)
+
+ bc = &benchConfig{
+ txnCount: 0,
+ round: 1,
+ b: b,
+ creator: creator,
+ accts: accts,
+ acctToAst: acctToAst,
+ acctToApp: acctToApp,
+ l0: l0,
+ l1: l1,
+ eval: eval,
+ }
+
+ // start the ledger with a pool of accounts
+ for i := 0; i < numAccts; i++ {
+ acct := addNewAccount(bc)
+ payTo(bc, bc.creator, acct, 1234567890000)
+ }
+
+ addBlock(bc)
+ vc := verify.GetMockedCache(true)
+ for _, blk := range bc.blocks {
+ _, err := internal.Eval(context.Background(), bc.l1, blk, true, vc, nil)
+ require.NoError(b, err)
+ err = bc.l1.AddBlock(blk, cert)
+ require.NoError(b, err)
+ }
+ bc.blocks = bc.blocks[len(bc.blocks):]
+ bc.txnCount = 0
+ bc.round = 0
+ bc.numPay = 0
+ return bc
+}
+
+func sendAssetEvent(bc *benchConfig, newAccount bool) {
+
+ // pick a random account
+ randAcct1 := bc.accts[mrand.Intn(len(bc.accts))]
+ randAcct2 := bc.accts[mrand.Intn(len(bc.accts))]
+ if newAccount {
+ randAcct2 = addNewAccount(bc)
+ payTo(bc, bc.creator, randAcct2, 100000000)
+ }
+
+ var assIdx basics.AssetIndex
+ for key, val := range bc.acctToAst[randAcct1] {
+ if val > 1 {
+ assIdx = key
+ break
+ }
+ }
+
+ if assIdx == 0 {
+ assIdx = createAssetForAcct(bc, randAcct1)
+ }
+
+ // opt in to the asset
+ if _, have := bc.acctToAst[randAcct2][assIdx]; !have {
+ sendAssetTo(bc, randAcct2, randAcct2, assIdx, 0)
+ }
+ sendAssetTo(bc, randAcct1, randAcct2, assIdx, 1)
+}
+
+func appCallEvent(bc *benchConfig, newAccount bool) {
+
+ // pick a random account
+ randAcct1 := bc.accts[mrand.Intn(len(bc.accts))]
+ randAcct2 := bc.accts[mrand.Intn(len(bc.accts))]
+ if newAccount {
+ randAcct2 = addNewAccount(bc)
+ payTo(bc, bc.creator, randAcct2, 100000000)
+ }
+
+ var appIdx basics.AppIndex
+ if len(bc.acctToApp) > 0 {
+ randApp := mrand.Intn(len(bc.acctToApp))
+ a := 0
+ for key := range bc.acctToApp[randAcct1] {
+ if a == randApp {
+ appIdx = key
+ break
+ }
+ a++
+ }
+ }
+
+ if appIdx == 0 {
+ appIdx = createAppForAcct(bc, randAcct1)
+ }
+
+ // opt in to the asset
+ if _, have := bc.acctToApp[randAcct2][appIdx]; !have {
+ optInApp(bc, randAcct2, appIdx)
+ }
+ callApp(bc, randAcct2, appIdx)
+}
+
+func payEvent(bc *benchConfig, newAccount bool) {
+ // pick a random account
+ randAcct1 := bc.accts[mrand.Intn(len(bc.accts))]
+ randAcct2 := bc.accts[mrand.Intn(len(bc.accts))]
+ if newAccount {
+ randAcct2 = addNewAccount(bc)
+ payTo(bc, bc.creator, randAcct2, 100000000)
+ } else {
+ payTo(bc, randAcct1, randAcct2, 10)
+ }
+}
+
+func sendAssetTo(bc *benchConfig, from, to basics.Address, assIdx basics.AssetIndex, amt uint64) {
+ tx := sendAssetTransaction(bc.txnCount, bc.round, from, to, assIdx, amt)
+ var stxn transactions.SignedTxn
+ stxn.Txn = tx
+ stxn.Sig = crypto.Signature{1}
+ addTransaction(bc, stxn)
+ bc.numAst++
+}
+
+func payTo(bc *benchConfig, from, to basics.Address, amt uint64) {
+ tx := createPaymentTransaction(uint64(bc.txnCount), bc.round, from, to, amt)
+ var stxn transactions.SignedTxn
+ stxn.Txn = tx
+ stxn.Sig = crypto.Signature{1}
+ addTransaction(bc, stxn)
+ bc.numPay++
+}
+
+func createAssetForAcct(bc *benchConfig, acct basics.Address) (aidx basics.AssetIndex) {
+ tx := createAssetTransaction(bc.txnCount, bc.round, acct)
+ stxn := transactions.SignedTxn{Txn: tx, Sig: crypto.Signature{1}}
+ aIdx := basics.AssetIndex(addTransaction(bc, stxn))
+ if len(bc.acctToAst[acct]) == 0 {
+ bc.acctToAst[acct] = make(map[basics.AssetIndex]uint64)
+ }
+ bc.acctToAst[acct][aIdx] = 3000000
+ bc.numAst++
+ return aIdx
+}
+
+func createAppForAcct(bc *benchConfig, acct basics.Address) (appIdx basics.AppIndex) {
+ tx, err := makeAppTransaction(bc.txnCount, bc.round, acct)
+ require.NoError(bc.b, err)
+ stxn := transactions.SignedTxn{Txn: tx, Sig: crypto.Signature{1}}
+ appIdx = basics.AppIndex(addTransaction(bc, stxn))
+ if len(bc.acctToApp[acct]) == 0 {
+ bc.acctToApp[acct] = make(map[basics.AppIndex]struct{})
+ }
+ bc.acctToApp[acct][appIdx] = struct{}{}
+ bc.numApp++
+ return appIdx
+}
+
+func optInApp(bc *benchConfig, acct basics.Address, appIdx basics.AppIndex) {
+ tx := makeOptInAppTransaction(bc.txnCount, appIdx, bc.round, acct)
+ var stxn transactions.SignedTxn
+ stxn.Txn = tx
+ stxn.Sig = crypto.Signature{1}
+ addTransaction(bc, stxn)
+ bc.numApp++
+}
+
+func callApp(bc *benchConfig, acct basics.Address, appIdx basics.AppIndex) {
+ tx := callAppTransaction(bc.txnCount, appIdx, bc.round, acct)
+ var stxn transactions.SignedTxn
+ stxn.Txn = tx
+ stxn.Sig = crypto.Signature{1}
+ addTransaction(bc, stxn)
+ bc.numApp++
+}
+
+func addNewAccount(bc *benchConfig) (acct basics.Address) {
+
+ acct = basics.Address{}
+ _, err := rand.Read(acct[:])
+ require.NoError(bc.b, err)
+ bc.accts = append(bc.accts, acct)
+ return acct
+}
+
+func addTransaction(bc *benchConfig, stxn transactions.SignedTxn) uint64 {
+ err := bc.eval.Transaction(stxn, transactions.ApplyData{})
+ if err == ledgercore.ErrNoSpace {
+ addBlock(bc)
+ addTransaction(bc, stxn)
+ } else {
+ require.NoError(bc.b, err)
+ bc.txnCount++
+ }
+ return bc.eval.TestingTxnCounter()
+}
+
+func addBlock(bc *benchConfig) {
+ vblk, err := bc.eval.GenerateBlock()
+ cert := agreement.Certificate{}
+ require.NoError(bc.b, err)
+ bc.blocks = append(bc.blocks, vblk.Block())
+
+ err = bc.l0.AddBlock(vblk.Block(), cert)
+ require.NoError(bc.b, err)
+
+ _, last := bc.l0.LatestCommitted()
+ prev, err := bc.l0.BlockHdr(basics.Round(last))
+ require.NoError(bc.b, err)
+ newBlk := bookkeeping.MakeBlock(prev)
+ bc.eval, err = bc.l0.StartEvaluator(newBlk.BlockHeader, 5000, 0)
+ bc.round++
+ require.NoError(bc.b, err)
+}
+
+// BenchmarkBlockValidationJustPayNoNew sends payment transactions between existing accounts,
+// by choosing pair of random accounts.
+func BenchmarkBlockValidationJustPayNoNew(b *testing.B) {
+ numAccts := 50000
+ newAcctProb := 0.0
+
+ // Set the probability in %
+ payProb := 1.0
+ astProb := 0.0
+ //appsProb := 0
+ benchmarkBlockValidationMix(b, newAcctProb, payProb, astProb, numAccts)
+}
+
+// BenchmarkBlockValidationJustPay sends payments between two random accounts, with
+// 50% probability of creating a new account.
+func BenchmarkBlockValidationJustPay(b *testing.B) {
+ numAccts := 50000
+ newAcctProb := 0.5
+
+ // Set the probability in %
+ payProb := 1.0
+ astProb := 0.0
+ //appsProb := 0
+ benchmarkBlockValidationMix(b, newAcctProb, payProb, astProb, numAccts)
+}
+
+// BenchmarkBlockValidationNoNew executes payment, asset or application events with
+// 30%, 50%, and 20% probability respectively among existing accounts.
+// Note that each event may involve multiple transactions (e.g. opt in to asset,
+// create app, opt in to app).
+func BenchmarkBlockValidationNoNew(b *testing.B) {
+ numAccts := 50000
+ newAcctProb := 0.0
+
+ // Set the probability in %
+ payProb := 0.3
+ astProb := 0.5
+ //appsProb := 0.2
+ benchmarkBlockValidationMix(b, newAcctProb, payProb, astProb, numAccts)
+}
+
+// BenchmarkBlockValidationMix executes payment, asset or application events with
+// 30%, 50%, and 20% probability respectively among existing or new accounts.
+// Note that each event may involve multiple transactions (e.g. funding new account,
+// opt in to asset, create app, opt in to app).
+func BenchmarkBlockValidationMix(b *testing.B) {
+ numAccts := 50000
+ newAcctProb := 0.5
+
+ // Set the probability in %
+ payProb := 0.3
+ astProb := 0.5
+ //appsProb := 0.2
+ benchmarkBlockValidationMix(b, newAcctProb, payProb, astProb, numAccts)
+}
+
+func benchmarkBlockValidationMix(b *testing.B, newAcctProb, payProb, astProb float64, numAccts int) {
+ bc := setupEnv(b, numAccts)
+
+ numBlocks := uint64(b.N)
+ cert := agreement.Certificate{}
+ fmt.Printf("Preparing... /%d: ", numBlocks)
+ s3 := time.Now()
+
+ for bc.round < numBlocks {
+ currentRound := bc.round
+ for bc.round == currentRound {
+ randNum := mrand.Float64()
+ if randNum < payProb {
+ // add pay transaction
+ payEvent(bc, mrand.Float64() < newAcctProb)
+ } else if randNum < payProb+astProb {
+ // add asset transactions
+ sendAssetEvent(bc, mrand.Float64() < newAcctProb)
+ } else {
+ // add app transaction
+ appCallEvent(bc, mrand.Float64() < newAcctProb)
+ }
+ }
+ if (currentRound+1)*10%(2*numBlocks) == 0 {
+ fmt.Printf("%d%% %.1fs ", (currentRound+1)*100/numBlocks, time.Since(s3).Seconds())
+ s3 = time.Now()
+ }
+
+ }
+ fmt.Printf("\nSummary %d blocks and %d txns: pay %d/blk (%d%%) assets %d/blk (%d%%) apps %d/blk (%d%%)\n",
+ numBlocks, bc.txnCount, bc.numPay/numBlocks, bc.numPay*100/bc.txnCount, bc.numAst/numBlocks, bc.numAst*100/bc.txnCount, bc.numApp/numBlocks, bc.numApp*100/bc.txnCount)
+
+ // eval + add all the (valid) blocks to the second ledger, measuring it this time
+ vc := verify.GetMockedCache(true)
+ tt := time.Now()
+ b.ResetTimer()
+ for _, blk := range bc.blocks {
+ _, err := internal.Eval(context.Background(), bc.l1, blk, true, vc, nil)
+ require.NoError(b, err)
+ err = bc.l1.AddBlock(blk, cert)
+ require.NoError(b, err)
+ }
+ fmt.Printf("%.1f sec / %d blks\n", time.Since(tt).Seconds(), numBlocks)
+}
+
+func createPaymentTransaction(
+ counter uint64,
+ round uint64,
+ sender basics.Address,
+ receiver basics.Address,
+ amount uint64) (txn transactions.Transaction) {
+
+ note := make([]byte, 8)
+ binary.LittleEndian.PutUint64(note, counter)
+ txn = transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee},
+ FirstValid: basics.Round(round),
+ LastValid: basics.Round(round + 1000),
+ GenesisHash: crypto.Digest{1},
+ Note: note,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: receiver,
+ Amount: basics.MicroAlgos{Raw: amount},
+ },
+ }
+ return
+}
+
+// prepares a create asset transaction
+func createAssetTransaction(
+ counter uint64,
+ round uint64,
+ sender basics.Address) (assetTx transactions.Transaction) {
+
+ note := make([]byte, 8)
+ binary.LittleEndian.PutUint64(note, counter)
+ assetTx = transactions.Transaction{
+ Type: protocol.AssetConfigTx,
+ Header: transactions.Header{
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee},
+ FirstValid: basics.Round(round),
+ LastValid: basics.Round(round + 1000),
+ GenesisHash: crypto.Digest{1},
+ Note: note,
+ },
+ AssetConfigTxnFields: transactions.AssetConfigTxnFields{
+ AssetParams: basics.AssetParams{
+ Total: 3000000,
+ DefaultFrozen: false,
+ Manager: sender,
+ },
+ },
+ }
+ return
+}
+
+// prepares a send asset transaction
+func sendAssetTransaction(
+ counter uint64,
+ round uint64,
+ sender basics.Address,
+ receiver basics.Address,
+ assetID basics.AssetIndex,
+ amt uint64) (tx transactions.Transaction) {
+
+ note := make([]byte, 8)
+ binary.LittleEndian.PutUint64(note, counter)
+ tx = transactions.Transaction{
+ Type: protocol.AssetTransferTx,
+ Header: transactions.Header{
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee},
+ FirstValid: basics.Round(round),
+ LastValid: basics.Round(round + 1000),
+ GenesisHash: crypto.Digest{1},
+ Note: note,
+ },
+ AssetTransferTxnFields: transactions.AssetTransferTxnFields{
+ XferAsset: assetID,
+ AssetAmount: amt,
+ AssetReceiver: receiver,
+ },
+ }
+ return
+}
+
+func makeAppTransaction(
+ counter uint64,
+ round uint64,
+ sender basics.Address) (appTx transactions.Transaction, err error) {
+
+ progCounter := uint64(1)
+ progCounter = counter
+ prog := fmt.Sprintf(`#pragma version 2
+// a simple global and local calls counter app
+byte b64 Y291bnRlcg== // counter
+dup
+app_global_get
+int %d
++
+app_global_put // update the counter
+int 0
+int 0
+app_opted_in
+bnz opted_in
+err
+opted_in:
+int 0 // account idx for app_local_put
+byte b64 Y291bnRlcg== // counter
+int 0
+byte b64 Y291bnRlcg==
+app_local_get
+int 1 // increment
++
+app_local_put
+int 1
+`, progCounter)
+
+ approvalOps, err := logic.AssembleString(prog)
+ if err != nil {
+ return transactions.Transaction{}, err
+ }
+ clearstateOps, err := logic.AssembleString("#pragma version 2\nint 1")
+ if err != nil {
+ return transactions.Transaction{}, err
+ }
+ schema := basics.StateSchema{
+ NumUint: 1,
+ }
+
+ // create the app
+ appTx = transactions.Transaction{}
+ appTx.Type = protocol.ApplicationCallTx
+ appTx.OnCompletion = transactions.OptInOC
+ appTx.ApprovalProgram = approvalOps.Program
+ appTx.ClearStateProgram = clearstateOps.Program
+ appTx.GlobalStateSchema = schema
+ appTx.LocalStateSchema = schema
+
+ note := make([]byte, 8)
+ binary.LittleEndian.PutUint64(note, counter)
+
+ appTx.Header = transactions.Header{
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee},
+ FirstValid: basics.Round(round),
+ LastValid: basics.Round(round + 1000),
+ GenesisHash: crypto.Digest{1},
+ Note: note,
+ }
+ appTx.Type = protocol.ApplicationCallTx
+ return
+}
+
+// prepares a opt-in app transaction
+func makeOptInAppTransaction(
+ counter uint64,
+ appIdx basics.AppIndex,
+ round uint64,
+ sender basics.Address) (appTx transactions.Transaction) {
+
+ note := make([]byte, 8)
+ binary.LittleEndian.PutUint64(note, counter)
+
+ appTx = transactions.Transaction{}
+ appTx.ApplicationID = basics.AppIndex(appIdx)
+ appTx.OnCompletion = transactions.OptInOC
+
+ appTx.Header = transactions.Header{
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee},
+ FirstValid: basics.Round(round),
+ LastValid: basics.Round(round + 1000),
+ GenesisHash: crypto.Digest{1},
+ Note: note,
+ }
+ appTx.Type = protocol.ApplicationCallTx
+ return
+}
+
+// prepare app call transaction
+func callAppTransaction(
+ counter uint64,
+ appIdx basics.AppIndex,
+ round uint64,
+ sender basics.Address) (appTx transactions.Transaction) {
+
+ note := make([]byte, 8)
+ binary.LittleEndian.PutUint64(note, counter)
+
+ appTx = transactions.Transaction{}
+ appTx.ApplicationID = basics.AppIndex(appIdx)
+ appTx.OnCompletion = transactions.NoOpOC
+
+ appTx.Header = transactions.Header{
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee},
+ FirstValid: basics.Round(round),
+ LastValid: basics.Round(round + 1000),
+ GenesisHash: crypto.Digest{1},
+ Note: note,
+ }
+ appTx.Type = protocol.ApplicationCallTx
+ return
+}
diff --git a/ledger/internal/apptxn_test.go b/ledger/internal/apptxn_test.go
index 962e1419c..86a4a7fea 100644
--- a/ledger/internal/apptxn_test.go
+++ b/ledger/internal/apptxn_test.go
@@ -2479,7 +2479,7 @@ func TestInnerClearState(t *testing.T) {
eval := nextBlock(t, l)
txn(t, l, eval, &inner)
vb := endBlock(t, l, eval)
- innerId := vb.Block().Payset[0].ApplicationID
+ innerID := vb.Block().Payset[0].ApplicationID
// Outer is a simple app that will invoke the given app (in ForeignApps[0])
// with the given OnCompletion (in ApplicationArgs[0]). Goal is to use it
@@ -2498,33 +2498,33 @@ itxn_begin
itxn_field OnCompletion
itxn_submit
`),
- ForeignApps: []basics.AppIndex{innerId},
+ ForeignApps: []basics.AppIndex{innerID},
}
eval = nextBlock(t, l)
txn(t, l, eval, &outer)
vb = endBlock(t, l, eval)
- outerId := vb.Block().Payset[0].ApplicationID
+ outerID := vb.Block().Payset[0].ApplicationID
fund := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: outerId.Address(),
+ Receiver: outerID.Address(),
Amount: 1_000_000,
}
call := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: outerId,
+ ApplicationID: outerID,
ApplicationArgs: [][]byte{{byte(transactions.OptInOC)}},
- ForeignApps: []basics.AppIndex{innerId},
+ ForeignApps: []basics.AppIndex{innerID},
}
eval = nextBlock(t, l)
txns(t, l, eval, &fund, &call)
endBlock(t, l, eval)
- outerAcct := lookup(t, l, outerId.Address())
+ outerAcct := lookup(t, l, outerID.Address())
require.Len(t, outerAcct.AppLocalStates, 1)
require.Equal(t, outerAcct.TotalAppSchema, basics.StateSchema{
NumUint: 2,
@@ -2536,7 +2536,7 @@ itxn_submit
txn(t, l, eval, &call)
endBlock(t, l, eval)
- outerAcct = lookup(t, l, outerId.Address())
+ outerAcct = lookup(t, l, outerID.Address())
require.Empty(t, outerAcct.AppLocalStates)
require.Empty(t, outerAcct.TotalAppSchema)
@@ -2567,7 +2567,7 @@ b top
eval := nextBlock(t, l)
txn(t, l, eval, &badCallee)
vb := endBlock(t, l, eval)
- badId := vb.Block().Payset[0].ApplicationID
+ badID := vb.Block().Payset[0].ApplicationID
// Outer is a simple app that will invoke the given app (in ForeignApps[0])
// with the given OnCompletion (in ApplicationArgs[0]). Goal is to use it
@@ -2603,33 +2603,33 @@ bnz skip // Don't do budget checking during optin
assert
skip:
`),
- ForeignApps: []basics.AppIndex{badId},
+ ForeignApps: []basics.AppIndex{badID},
}
eval = nextBlock(t, l)
txn(t, l, eval, &outer)
vb = endBlock(t, l, eval)
- outerId := vb.Block().Payset[0].ApplicationID
+ outerID := vb.Block().Payset[0].ApplicationID
fund := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: outerId.Address(),
+ Receiver: outerID.Address(),
Amount: 1_000_000,
}
call := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: outerId,
+ ApplicationID: outerID,
ApplicationArgs: [][]byte{{byte(transactions.OptInOC)}},
- ForeignApps: []basics.AppIndex{badId},
+ ForeignApps: []basics.AppIndex{badID},
}
eval = nextBlock(t, l)
txns(t, l, eval, &fund, &call)
endBlock(t, l, eval)
- outerAcct := lookup(t, l, outerId.Address())
+ outerAcct := lookup(t, l, outerID.Address())
require.Len(t, outerAcct.AppLocalStates, 1)
// When doing a clear state, `call` checks that budget wasn't stolen
@@ -2639,7 +2639,7 @@ skip:
endBlock(t, l, eval)
// Clearstate took effect, despite failure from infinite loop
- outerAcct = lookup(t, l, outerId.Address())
+ outerAcct = lookup(t, l, outerID.Address())
require.Empty(t, outerAcct.AppLocalStates)
}
@@ -2697,8 +2697,8 @@ log
eval := nextBlock(t, l)
txns(t, l, eval, &inner, &waster)
vb := endBlock(t, l, eval)
- innerId := vb.Block().Payset[0].ApplicationID
- wasterId := vb.Block().Payset[1].ApplicationID
+ innerID := vb.Block().Payset[0].ApplicationID
+ wasterID := vb.Block().Payset[1].ApplicationID
// Grouper is a simple app that will invoke the given apps (in
// ForeignApps[0,1]) as a group, with the given OnCompletion (in
@@ -2730,27 +2730,27 @@ itxn_submit
eval = nextBlock(t, l)
txn(t, l, eval, &grouper)
vb = endBlock(t, l, eval)
- grouperId := vb.Block().Payset[0].ApplicationID
+ grouperID := vb.Block().Payset[0].ApplicationID
fund := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: grouperId.Address(),
+ Receiver: grouperID.Address(),
Amount: 1_000_000,
}
call := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: grouperId,
+ ApplicationID: grouperID,
ApplicationArgs: [][]byte{{byte(transactions.OptInOC)}, {byte(transactions.OptInOC)}},
- ForeignApps: []basics.AppIndex{wasterId, innerId},
+ ForeignApps: []basics.AppIndex{wasterID, innerID},
}
eval = nextBlock(t, l)
txns(t, l, eval, &fund, &call)
endBlock(t, l, eval)
- gAcct := lookup(t, l, grouperId.Address())
+ gAcct := lookup(t, l, grouperID.Address())
require.Len(t, gAcct.AppLocalStates, 2)
call.ApplicationArgs = [][]byte{{byte(transactions.CloseOutOC)}, {byte(transactions.ClearStateOC)}}
@@ -2760,7 +2760,7 @@ itxn_submit
require.Len(t, vb.Block().Payset, 0)
// Clearstate did not take effect, since the caller tried to shortchange the CSP
- gAcct = lookup(t, l, grouperId.Address())
+ gAcct = lookup(t, l, grouperID.Address())
require.Len(t, gAcct.AppLocalStates, 2)
}
@@ -3310,14 +3310,9 @@ func TestReloadWithTxns(t *testing.T) {
dl.fullBlock() // So that the `block` opcode has a block to inspect
lookHdr := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: `
- txn FirstValid
- int 1
- -
- block BlkTimestamp
-`,
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: "txn FirstValid; int 1; -; block BlkTimestamp",
}
dl.fullBlock(&lookHdr)
diff --git a/ledger/internal/eval.go b/ledger/internal/eval.go
index 1944c4a69..f2750d8a0 100644
--- a/ledger/internal/eval.go
+++ b/ledger/internal/eval.go
@@ -1476,7 +1476,7 @@ func (validator *evalTxValidator) run() {
unverifiedTxnGroups = append(unverifiedTxnGroups, signedTxnGroup)
}
- unverifiedTxnGroups = validator.txcache.GetUnverifiedTranscationGroups(unverifiedTxnGroups, specialAddresses, validator.block.BlockHeader.CurrentProtocol)
+ unverifiedTxnGroups = validator.txcache.GetUnverifiedTransactionGroups(unverifiedTxnGroups, specialAddresses, validator.block.BlockHeader.CurrentProtocol)
err := verify.PaysetGroups(validator.ctx, unverifiedTxnGroups, validator.block.BlockHeader, validator.verificationPool, validator.txcache, validator.ledger)
if err != nil {
diff --git a/ledger/internal/eval_blackbox_test.go b/ledger/internal/eval_blackbox_test.go
index a34dd2f31..b6eb6b9c1 100644
--- a/ledger/internal/eval_blackbox_test.go
+++ b/ledger/internal/eval_blackbox_test.go
@@ -533,6 +533,16 @@ func endBlock(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator
require.NoError(t, err)
err = ledger.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
require.NoError(t, err)
+ // `rndBQ` gives the latest known block round added to the ledger
+ // we should wait until `rndBQ` block to be committed to blockQueue,
+ // in case there is a data race, noted in
+ // https://github.com/algorand/go-algorand/issues/4349
+ // where writing to `callTxnGroup` after `dl.fullBlock` caused data race,
+ // because the underlying async goroutine `go bq.syncer()` is reading `callTxnGroup`.
+ // A solution here would be wait until all new added blocks are committed,
+ // then we return the result and continue the execution.
+ rndBQ := ledger.Latest()
+ ledger.WaitForCommit(rndBQ)
return validatedBlock
}
@@ -1037,13 +1047,13 @@ func TestLogsInBlock(t *testing.T) {
}
vb := dl.fullBlock(&createTxn)
createInBlock := vb.Block().Payset[0]
- appId := createInBlock.ApplyData.ApplicationID
+ appID := createInBlock.ApplyData.ApplicationID
require.Equal(t, "APP", createInBlock.ApplyData.EvalDelta.Logs[0])
optInTxn := txntest.Txn{
Type: protocol.ApplicationCallTx,
Sender: addrs[1],
- ApplicationID: appId,
+ ApplicationID: appID,
OnCompletion: transactions.OptInOC,
}
vb = dl.fullBlock(&optInTxn)
@@ -1053,7 +1063,7 @@ func TestLogsInBlock(t *testing.T) {
clearTxn := txntest.Txn{
Type: protocol.ApplicationCallTx,
Sender: addrs[1],
- ApplicationID: appId,
+ ApplicationID: appID,
OnCompletion: transactions.ClearStateOC,
}
vb = dl.fullBlock(&clearTxn)
@@ -1092,7 +1102,7 @@ func TestUnfundedSenders(t *testing.T) {
ghost := basics.Address{0x01}
- asa_create := txntest.Txn{
+ asaCreate := txntest.Txn{
Type: "acfg",
Sender: addrs[0],
AssetParams: basics.AssetParams{
@@ -1103,12 +1113,12 @@ func TestUnfundedSenders(t *testing.T) {
},
}
- app_create := txntest.Txn{
+ appCreate := txntest.Txn{
Type: "appl",
Sender: addrs[0],
}
- dl.fullBlock(&asa_create, &app_create)
+ dl.fullBlock(&asaCreate, &appCreate)
// Advance so that rewardsLevel increases
for i := 1; i < 10; i++ {
@@ -1220,7 +1230,7 @@ func TestAppCallAppDuringInit(t *testing.T) {
dl.fullBlock()
}
- call_in_init := txntest.Txn{
+ callInInit := txntest.Txn{
Type: "appl",
Sender: addrs[0],
ApprovalProgram: `
@@ -1241,6 +1251,6 @@ func TestAppCallAppDuringInit(t *testing.T) {
// In the old days, balances.Move would try to increase the rewardsState on the unfunded account
problem = "balance 0 below min"
}
- dl.txn(&call_in_init, problem)
+ dl.txn(&callInInit, problem)
})
}
diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go
index 469f681b9..95a0a84e2 100644
--- a/ledger/ledger_test.go
+++ b/ledger/ledger_test.go
@@ -1388,6 +1388,7 @@ func TestLedgerBlockHdrCaching(t *testing.T) {
cfg := config.GetDefaultLocal()
cfg.Archival = true
log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
a.NoError(err)
defer l.Close()
@@ -1484,6 +1485,7 @@ func TestLedgerReload(t *testing.T) {
cfg := config.GetDefaultLocal()
cfg.Archival = true
log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
require.NoError(t, err)
defer l.Close()
@@ -1682,11 +1684,13 @@ func TestLedgerKeepsOldBlocksForStateProof(t *testing.T) {
cfg := config.GetDefaultLocal()
cfg.Archival = false
log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
require.NoError(t, err)
defer l.Close()
lastBlock, err := l.Block(l.Latest())
+ require.NoError(t, err)
proto := config.Consensus[lastBlock.CurrentProtocol]
accounts := make(map[basics.Address]basics.AccountData, len(genesisInitState.Accounts)+maxBlocks)
keys := make(map[basics.Address]*crypto.SignatureSecrets, len(initKeys)+maxBlocks)
@@ -2849,6 +2853,7 @@ func TestVotersReloadFromDisk(t *testing.T) {
cfg.Archival = false
cfg.MaxAcctLookback = proto.StateProofInterval - proto.StateProofVotersLookback - 10
log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
require.NoError(t, err)
defer l.Close()
@@ -2896,6 +2901,7 @@ func TestVotersReloadFromDiskAfterOneStateProofCommitted(t *testing.T) {
cfg.Archival = false
cfg.MaxAcctLookback = proto.StateProofInterval - proto.StateProofVotersLookback - 10
log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
require.NoError(t, err)
defer l.Close()
@@ -2955,6 +2961,7 @@ func TestVotersReloadFromDiskPassRecoveryPeriod(t *testing.T) {
cfg.Archival = false
cfg.MaxAcctLookback = proto.StateProofInterval - proto.StateProofVotersLookback - 10
log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
require.NoError(t, err)
defer l.Close()
diff --git a/ledger/msgp_gen.go b/ledger/msgp_gen.go
index 58824b2ae..d76a3a0db 100644
--- a/ledger/msgp_gen.go
+++ b/ledger/msgp_gen.go
@@ -1768,7 +1768,7 @@ func (z *catchpointFileBalancesChunkV6) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
zb0002Len := uint32(1)
- var zb0002Mask uint8 /* 2 bits */
+ var zb0002Mask uint8 /* 3 bits */
if len((*z).Balances) == 0 {
zb0002Len--
zb0002Mask |= 0x2
@@ -2298,8 +2298,8 @@ func (z *encodedBalanceRecordV5) MsgIsZero() bool {
func (z *encodedBalanceRecordV6) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0003Len := uint32(3)
- var zb0003Mask uint8 /* 4 bits */
+ zb0003Len := uint32(4)
+ var zb0003Mask uint8 /* 5 bits */
if (*z).Address.MsgIsZero() {
zb0003Len--
zb0003Mask |= 0x2
@@ -2312,6 +2312,10 @@ func (z *encodedBalanceRecordV6) MarshalMsg(b []byte) (o []byte) {
zb0003Len--
zb0003Mask |= 0x8
}
+ if (*z).ExpectingMoreEntries == false {
+ zb0003Len--
+ zb0003Mask |= 0x10
+ }
// variable map header, size zb0003Len
o = append(o, 0x80|uint8(zb0003Len))
if zb0003Len != 0 {
@@ -2345,6 +2349,11 @@ func (z *encodedBalanceRecordV6) MarshalMsg(b []byte) (o []byte) {
o = zb0002.MarshalMsg(o)
}
}
+ if (zb0003Mask & 0x10) == 0 { // if not empty
+ // string "e"
+ o = append(o, 0xa1, 0x65)
+ o = msgp.AppendBool(o, (*z).ExpectingMoreEntries)
+ }
}
return
}
@@ -2420,6 +2429,14 @@ func (z *encodedBalanceRecordV6) UnmarshalMsg(bts []byte) (o []byte, err error)
}
}
if zb0003 > 0 {
+ zb0003--
+ (*z).ExpectingMoreEntries, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpectingMoreEntries")
+ return
+ }
+ }
+ if zb0003 > 0 {
err = msgp.ErrTooManyArrayFields(zb0003)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
@@ -2488,6 +2505,12 @@ func (z *encodedBalanceRecordV6) UnmarshalMsg(bts []byte) (o []byte, err error)
}
(*z).Resources[zb0001] = zb0002
}
+ case "e":
+ (*z).ExpectingMoreEntries, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpectingMoreEntries")
+ return
+ }
default:
err = msgp.ErrNoField(string(field))
if err != nil {
@@ -2516,12 +2539,13 @@ func (z *encodedBalanceRecordV6) Msgsize() (s int) {
s += 0 + msgp.Uint64Size + zb0002.Msgsize()
}
}
+ s += 2 + msgp.BoolSize
return
}
// MsgIsZero returns whether this is a zero value
func (z *encodedBalanceRecordV6) MsgIsZero() bool {
- return ((*z).Address.MsgIsZero()) && ((*z).AccountData.MsgIsZero()) && (len((*z).Resources) == 0)
+ return ((*z).Address.MsgIsZero()) && ((*z).AccountData.MsgIsZero()) && (len((*z).Resources) == 0) && ((*z).ExpectingMoreEntries == false)
}
// MarshalMsg implements msgp.Marshaler
diff --git a/ledger/testing/randomAccounts.go b/ledger/testing/randomAccounts.go
index 0f20579a8..cea52d434 100644
--- a/ledger/testing/randomAccounts.go
+++ b/ledger/testing/randomAccounts.go
@@ -58,7 +58,7 @@ func RandomAccountData(rewardsBase uint64) basics.AccountData {
switch crypto.RandUint64() % 3 {
case 0:
data.Status = basics.Online
- data.VoteLastValid = 1000
+ data.VoteLastValid = 10000
case 1:
data.Status = basics.Offline
data.VoteLastValid = 0
diff --git a/ledger/voters_test.go b/ledger/voters_test.go
index b13b11d97..78e9eb7b8 100644
--- a/ledger/voters_test.go
+++ b/ledger/voters_test.go
@@ -20,6 +20,7 @@ import (
"testing"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklesignature"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
@@ -30,12 +31,46 @@ import (
"github.com/stretchr/testify/require"
)
-func addBlockToAccountsUpdate(blk bookkeeping.Block, ao *onlineAccounts) {
+func addBlockToAccountsUpdate(blk bookkeeping.Block, ao *onlineAccounts, totals ledgercore.AccountTotals) {
updates := ledgercore.MakeAccountDeltas(1)
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
+ delta.Accts.MergeAccounts(updates)
+ delta.Totals = totals
ao.newBlock(blk, delta)
}
+func checkVoters(a *require.Assertions, ao *onlineAccounts, expectedSize uint64) {
+ a.Equal(expectedSize, uint64(len(ao.voters.votersForRoundCache)))
+ for _, v := range ao.voters.votersForRoundCache {
+ err := v.Wait()
+ a.NoError(err)
+ a.NotZero(v.TotalWeight)
+ a.NotZero(len(v.Participants))
+ a.NotZero(v.Tree.NumOfElements)
+ }
+}
+
+func makeRandomOnlineAccounts(numberOfAccounts uint64) map[basics.Address]basics.AccountData {
+ res := make(map[basics.Address]basics.AccountData)
+
+ for i := uint64(0); i < numberOfAccounts; i++ {
+ var data basics.AccountData
+
+ // Avoid overflowing totals
+ data.MicroAlgos.Raw = crypto.RandUint64() % (1 << 32)
+
+ data.Status = basics.Online
+ data.VoteLastValid = 10000000
+
+ data.VoteFirstValid = 0
+ data.RewardsBase = 0
+
+ res[ledgertesting.RandomAddress()] = data
+ }
+
+ return res
+}
+
func TestVoterTrackerDeleteVotersAfterStateproofConfirmed(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
@@ -44,7 +79,7 @@ func TestVoterTrackerDeleteVotersAfterStateproofConfirmed(t *testing.T) {
numOfIntervals := config.Consensus[protocol.ConsensusCurrentVersion].StateProofMaxRecoveryIntervals - 1
lookbackForTest := config.Consensus[protocol.ConsensusCurrentVersion].StateProofVotersLookback
- accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+ accts := []map[basics.Address]basics.AccountData{makeRandomOnlineAccounts(20)}
pooldata := basics.AccountData{}
pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
@@ -64,15 +99,18 @@ func TestVoterTrackerDeleteVotersAfterStateproofConfirmed(t *testing.T) {
defer au.close()
defer ao.close()
+ _, totals, err := au.LatestTotals()
+ require.NoError(t, err)
+
i := uint64(1)
// adding blocks to the voterstracker (in order to pass the numOfIntervals*stateproofInterval we add 1)
for ; i < (numOfIntervals*intervalForTest)+1; i++ {
block := randomBlock(basics.Round(i))
block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao)
+ addBlockToAccountsUpdate(block.block, ao, totals)
}
- a.Equal(numOfIntervals, uint64(len(ao.voters.votersForRoundCache)))
+ checkVoters(a, ao, numOfIntervals)
a.Equal(basics.Round(intervalForTest-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
block := randomBlock(basics.Round(i))
@@ -84,13 +122,13 @@ func TestVoterTrackerDeleteVotersAfterStateproofConfirmed(t *testing.T) {
stateTracking.StateProofNextRound = basics.Round((numOfIntervals - 1) * intervalForTest)
block.block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
block.block.BlockHeader.StateProofTracking[protocol.StateProofBasic] = stateTracking
- addBlockToAccountsUpdate(block.block, ao)
+ addBlockToAccountsUpdate(block.block, ao, totals)
// the tracker should have 3 entries
// - voters to confirm the numOfIntervals - 1 th interval
// - voters to confirm the numOfIntervals th interval
// - voters to confirm the numOfIntervals + 1 th interval
- a.Equal(uint64(3), uint64(len(ao.voters.votersForRoundCache)))
+ checkVoters(a, ao, 3)
a.Equal(basics.Round((numOfIntervals-2)*intervalForTest-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
block = randomBlock(basics.Round(i))
@@ -98,9 +136,9 @@ func TestVoterTrackerDeleteVotersAfterStateproofConfirmed(t *testing.T) {
stateTracking.StateProofNextRound = basics.Round(numOfIntervals * intervalForTest)
block.block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
block.block.BlockHeader.StateProofTracking[protocol.StateProofBasic] = stateTracking
- addBlockToAccountsUpdate(block.block, ao)
+ addBlockToAccountsUpdate(block.block, ao, totals)
- a.Equal(uint64(2), uint64(len(ao.voters.votersForRoundCache)))
+ checkVoters(a, ao, 2)
a.Equal(basics.Round((numOfIntervals-1)*intervalForTest-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
}
@@ -112,7 +150,7 @@ func TestLimitVoterTracker(t *testing.T) {
recoveryIntervalForTests := config.Consensus[protocol.ConsensusCurrentVersion].StateProofMaxRecoveryIntervals
lookbackForTest := config.Consensus[protocol.ConsensusCurrentVersion].StateProofVotersLookback
- accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+ accts := []map[basics.Address]basics.AccountData{makeRandomOnlineAccounts(20)}
pooldata := basics.AccountData{}
pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
@@ -132,6 +170,9 @@ func TestLimitVoterTracker(t *testing.T) {
defer au.close()
defer ao.close()
+ _, totals, err := au.LatestTotals()
+ require.NoError(t, err)
+
i := uint64(1)
// since the first state proof is expected to happen on stateproofInterval*2 we would start give-up on state proofs
@@ -141,33 +182,33 @@ func TestLimitVoterTracker(t *testing.T) {
for ; i < intervalForTest*(recoveryIntervalForTests+2); i++ {
block := randomBlock(basics.Round(i))
block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao)
+ addBlockToAccountsUpdate(block.block, ao, totals)
}
// the votersForRoundCache should contains recoveryIntervalForTests+2 elements:
// recoveryIntervalForTests - since this is the recovery interval
// + 1 - since votersForRoundCache would contain the votersForRound for the next state proof to come
// + 1 - in order to confirm recoveryIntervalForTests number of state proofs we need recoveryIntervalForTests + 1 headers (for the commitment)
- a.Equal(recoveryIntervalForTests+2, uint64(len(ao.voters.votersForRoundCache)))
+ checkVoters(a, ao, recoveryIntervalForTests+2)
a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
// after adding the round intervalForTest*(recoveryIntervalForTests+3)+1 we expect the voter tracker to remove voters
for ; i < intervalForTest*(recoveryIntervalForTests+3)+1; i++ {
block := randomBlock(basics.Round(i))
block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao)
+ addBlockToAccountsUpdate(block.block, ao, totals)
}
- a.Equal(recoveryIntervalForTests+2, uint64(len(ao.voters.votersForRoundCache)))
+ checkVoters(a, ao, recoveryIntervalForTests+2)
a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval*2-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
// after adding the round intervalForTest*(recoveryIntervalForTests+3)+1 we expect the voter tracker to remove voters
for ; i < intervalForTest*(recoveryIntervalForTests+4)+1; i++ {
block := randomBlock(basics.Round(i))
block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao)
+ addBlockToAccountsUpdate(block.block, ao, totals)
}
- a.Equal(recoveryIntervalForTests+2, uint64(len(ao.voters.votersForRoundCache)))
+ checkVoters(a, ao, recoveryIntervalForTests+2)
a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval*3-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
// if the last round of the intervalForTest has not been added to the ledger the votersTracker would
@@ -175,17 +216,17 @@ func TestLimitVoterTracker(t *testing.T) {
for ; i < intervalForTest*(recoveryIntervalForTests+5); i++ {
block := randomBlock(basics.Round(i))
block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao)
+ addBlockToAccountsUpdate(block.block, ao, totals)
}
- a.Equal(recoveryIntervalForTests+3, uint64(len(ao.voters.votersForRoundCache)))
+ checkVoters(a, ao, recoveryIntervalForTests+3)
a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval*3-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
for ; i < intervalForTest*(recoveryIntervalForTests+5)+1; i++ {
block := randomBlock(basics.Round(i))
block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao)
+ addBlockToAccountsUpdate(block.block, ao, totals)
}
- a.Equal(recoveryIntervalForTests+2, uint64(len(ao.voters.votersForRoundCache)))
+ checkVoters(a, ao, recoveryIntervalForTests+2)
a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval*4-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
}
@@ -196,7 +237,7 @@ func TestTopNAccountsThatHaveNoMssKeys(t *testing.T) {
intervalForTest := config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval
lookbackForTest := config.Consensus[protocol.ConsensusCurrentVersion].StateProofVotersLookback
- accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+ accts := []map[basics.Address]basics.AccountData{makeRandomOnlineAccounts(20)}
pooldata := basics.AccountData{}
pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
@@ -216,11 +257,14 @@ func TestTopNAccountsThatHaveNoMssKeys(t *testing.T) {
defer au.close()
defer ao.close()
+ _, totals, err := au.LatestTotals()
+ require.NoError(t, err)
+
i := uint64(1)
for ; i < (intervalForTest)+1; i++ {
block := randomBlock(basics.Round(i))
block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao)
+ addBlockToAccountsUpdate(block.block, ao, totals)
}
top, err := ao.voters.getVoters(basics.Round(intervalForTest - lookbackForTest))
diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go
index b19d379a3..3ec8cd45c 100644
--- a/libgoal/libgoal.go
+++ b/libgoal/libgoal.go
@@ -20,7 +20,6 @@ import (
"encoding/json"
"errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"time"
@@ -63,6 +62,10 @@ type Client struct {
consensus config.ConsensusProtocols
algodVersionAffinity algodclient.APIVersion
kmdVersionAffinity kmdclient.APIVersion
+
+ suggestedParamsCache v1.TransactionParams
+ suggestedParamsExpire time.Time
+ suggestedParamsMaxAge time.Duration
}
// ClientConfig is data to configure a Client
@@ -514,7 +517,7 @@ func (c *Client) signAndBroadcastTransactionWithWallet(walletHandle, pw []byte,
// M | M | error
//
func (c *Client) ComputeValidityRounds(firstValid, lastValid, validRounds uint64) (first, last, latest uint64, err error) {
- params, err := c.SuggestedParams()
+ params, err := c.cachedSuggestedParams()
if err != nil {
return 0, 0, 0, err
}
@@ -577,7 +580,7 @@ func (c *Client) ConstructPayment(from, to string, fee, amount uint64, note []by
}
// Get current round, protocol, genesis ID
- params, err := c.SuggestedParams()
+ params, err := c.cachedSuggestedParams()
if err != nil {
return transactions.Transaction{}, err
}
@@ -921,6 +924,23 @@ func (c *Client) SuggestedParams() (params v1.TransactionParams, err error) {
return
}
+// SetSuggestedParamsCacheAge sets the maximum age for an internal cached version of SuggestedParams() used internally to many libgoal Client functions.
+func (c *Client) SetSuggestedParamsCacheAge(maxAge time.Duration) {
+ c.suggestedParamsMaxAge = maxAge
+}
+
+func (c *Client) cachedSuggestedParams() (params v1.TransactionParams, err error) {
+ if c.suggestedParamsMaxAge == 0 || time.Now().After(c.suggestedParamsExpire) {
+ params, err = c.SuggestedParams()
+ if err == nil && c.suggestedParamsMaxAge != 0 {
+ c.suggestedParamsCache = params
+ c.suggestedParamsExpire = time.Now().Add(c.suggestedParamsMaxAge)
+ }
+ return
+ }
+ return c.suggestedParamsCache, nil
+}
+
// GetPendingTransactions gets a snapshot of current pending transactions on the node.
// If maxTxns = 0, fetches as many transactions as possible.
func (c *Client) GetPendingTransactions(maxTxns uint64) (resp v1.PendingTransactions, err error) {
@@ -968,7 +988,7 @@ func (c *Client) VerifyParticipationKey(timeout time.Duration, participationID s
// AddParticipationKey takes a participation key file and sends it to the node.
// The key will be loaded into the system when the function returns successfully.
func (c *Client) AddParticipationKey(keyfile string) (resp generated.PostParticipationResponse, err error) {
- data, err := ioutil.ReadFile(keyfile)
+ data, err := os.ReadFile(keyfile)
if err != nil {
return
}
diff --git a/libgoal/lockedFile.go b/libgoal/lockedFile.go
index 3c827d870..26b235d24 100644
--- a/libgoal/lockedFile.go
+++ b/libgoal/lockedFile.go
@@ -18,7 +18,7 @@ package libgoal
import (
"fmt"
- "io/ioutil"
+ "io"
"os"
)
@@ -72,7 +72,7 @@ func (f *lockedFile) read() (bytes []byte, err error) {
}
}()
- bytes, err = ioutil.ReadAll(fd)
+ bytes, err = io.ReadAll(fd)
return
}
diff --git a/libgoal/transactions.go b/libgoal/transactions.go
index c28fd0216..fb788f024 100644
--- a/libgoal/transactions.go
+++ b/libgoal/transactions.go
@@ -24,7 +24,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklesignature"
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
- "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+ v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
@@ -253,7 +253,7 @@ func generateRegistrationTransaction(part generated.ParticipationKey, fee basics
func (c *Client) MakeRegistrationTransactionWithGenesisID(part account.Participation, fee, txnFirstValid, txnLastValid uint64, leaseBytes [32]byte, includeStateProofKeys bool) (transactions.Transaction, error) {
// Get current round, protocol, genesis ID
- params, err := c.SuggestedParams()
+ params, err := c.cachedSuggestedParams()
if err != nil {
return transactions.Transaction{}, err
}
@@ -293,7 +293,7 @@ func (c *Client) MakeUnsignedGoOnlineTx(address string, firstValid, lastValid, f
}
// Get current round, protocol, genesis ID
- params, err := c.SuggestedParams()
+ params, err := c.cachedSuggestedParams()
if err != nil {
return transactions.Transaction{}, err
}
@@ -350,7 +350,7 @@ func (c *Client) MakeUnsignedGoOfflineTx(address string, firstValid, lastValid,
return transactions.Transaction{}, err
}
- params, err := c.SuggestedParams()
+ params, err := c.cachedSuggestedParams()
if err != nil {
return transactions.Transaction{}, err
}
@@ -405,7 +405,7 @@ func (c *Client) MakeUnsignedBecomeNonparticipatingTx(address string, firstValid
return transactions.Transaction{}, err
}
- params, err := c.SuggestedParams()
+ params, err := c.cachedSuggestedParams()
if err != nil {
return transactions.Transaction{}, err
}
@@ -460,7 +460,7 @@ func (c *Client) FillUnsignedTxTemplate(sender string, firstValid, lastValid, fe
return transactions.Transaction{}, err
}
- params, err := c.SuggestedParams()
+ params, err := c.cachedSuggestedParams()
if err != nil {
return transactions.Transaction{}, err
}
@@ -637,7 +637,7 @@ func (c *Client) MakeUnsignedAssetCreateTx(total uint64, defaultFrozen bool, man
}
// Get consensus params so we can get max field lengths
- params, err := c.SuggestedParams()
+ params, err := c.cachedSuggestedParams()
if err != nil {
return transactions.Transaction{}, err
}
diff --git a/logging/cyclicWriter_test.go b/logging/cyclicWriter_test.go
index d1eaa43b0..5719be930 100644
--- a/logging/cyclicWriter_test.go
+++ b/logging/cyclicWriter_test.go
@@ -17,7 +17,6 @@
package logging
import (
- "io/ioutil"
"os"
"testing"
@@ -49,12 +48,12 @@ func TestCyclicWrite(t *testing.T) {
require.NoError(t, err)
require.Equal(t, len(secondWrite), n)
- liveData, err := ioutil.ReadFile(liveFileName)
+ liveData, err := os.ReadFile(liveFileName)
require.NoError(t, err)
require.Len(t, liveData, len(secondWrite))
require.Equal(t, byte('B'), liveData[0])
- oldData, err := ioutil.ReadFile(archiveFileName)
+ oldData, err := os.ReadFile(archiveFileName)
require.NoError(t, err)
require.Len(t, oldData, space)
for i := 0; i < space; i++ {
diff --git a/logging/telemetryspec/event.go b/logging/telemetryspec/event.go
index 81d228324..15a046164 100644
--- a/logging/telemetryspec/event.go
+++ b/logging/telemetryspec/event.go
@@ -29,6 +29,12 @@ type Event string
// StartupEvent event
const StartupEvent Event = "Startup"
+// NameValue defines a named value, for use in an array reported to telemetry.
+type NameValue struct {
+ Name string
+ Value interface{}
+}
+
// StartupEventDetails contains details for the StartupEvent
type StartupEventDetails struct {
Version string
@@ -36,6 +42,7 @@ type StartupEventDetails struct {
Branch string
Channel string
InstanceHash string
+ Overrides []NameValue
}
// HeartbeatEvent is sent periodically to indicate node is running
@@ -84,6 +91,7 @@ type BlockAcceptedEventDetails struct {
Hash string
Round uint64
ValidatedAt time.Duration
+ ReceivedAt time.Duration
PreValidated bool
PropBufLen uint64
VoteBufLen uint64
@@ -292,6 +300,8 @@ type PeerConnectionDetails struct {
Endpoint string `json:",omitempty"`
// MessageDelay is the avarage relative message delay. Not being used for incoming connection.
MessageDelay int64 `json:",omitempty"`
+ // DuplicateFilterCount is the number of times this peer has sent us a message hash to filter that it had already sent before.
+ DuplicateFilterCount int64
}
// CatchpointGenerationEvent event
diff --git a/netdeploy/network.go b/netdeploy/network.go
index f54eef4cc..78a665b78 100644
--- a/netdeploy/network.go
+++ b/netdeploy/network.go
@@ -19,7 +19,6 @@ package netdeploy
import (
"encoding/json"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"sort"
@@ -233,7 +232,7 @@ func saveNetworkCfg(cfg NetworkCfg, configFile string) error {
func (n *Network) scanForNodes() error {
// Enumerate direct sub-directories of our root and look for valid node data directories (where genesis.json exists)
- entries, err := ioutil.ReadDir(n.rootDir)
+ entries, err := os.ReadDir(n.rootDir)
if err != nil {
return err
}
diff --git a/netdeploy/networkTemplate.go b/netdeploy/networkTemplate.go
index 293fbaa86..545617928 100644
--- a/netdeploy/networkTemplate.go
+++ b/netdeploy/networkTemplate.go
@@ -20,7 +20,7 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"math/big"
"os"
"path/filepath"
@@ -107,8 +107,8 @@ func (t NetworkTemplate) createNodeDirectories(targetFolder string, binDir strin
return
}
- var files []os.FileInfo
- files, err = ioutil.ReadDir(targetFolder)
+ var files []fs.DirEntry
+ files, err = os.ReadDir(targetFolder)
if err != nil {
return
}
diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go
index 3382edee0..23828d469 100644
--- a/netdeploy/remote/deployedNetwork.go
+++ b/netdeploy/remote/deployedNetwork.go
@@ -19,7 +19,7 @@ package remote
import (
"encoding/json"
"fmt"
- "io/ioutil"
+ "io/fs"
"math/rand"
"os"
"path/filepath"
@@ -128,7 +128,7 @@ func InitDeployedNetworkConfig(file string, buildConfig BuildConfig) (cfg Deploy
}
func loadAndProcessConfig(file string, buildConfig BuildConfig) (expanded string, err error) {
- raw, err := ioutil.ReadFile(file)
+ raw, err := os.ReadFile(file)
if err != nil {
return
}
@@ -287,7 +287,7 @@ func validateFilename(filename string) (err error) {
if strings.Index(filename, "*") >= 0 {
return ErrDeployedNetworkNameCantIncludeWildcard
}
- file, err := ioutil.TempFile("", filename)
+ file, err := os.CreateTemp("", filename)
if err == nil {
file.Close()
os.Remove(file.Name())
@@ -831,8 +831,8 @@ func (cfg DeployedNetwork) createHostFolders(targetFolder string, genesisFolder
}
func (cfg DeployedNetwork) copyWalletsToNodes(genesisFolder string, walletNameToDataMap map[string]walletTargetData) (err error) {
- var files []os.FileInfo
- files, err = ioutil.ReadDir(genesisFolder)
+ var files []fs.DirEntry
+ files, err = os.ReadDir(genesisFolder)
if err != nil {
return
}
diff --git a/netdeploy/remote/nodecfg/nodeDir.go b/netdeploy/remote/nodecfg/nodeDir.go
index a59b15c3a..113687545 100644
--- a/netdeploy/remote/nodecfg/nodeDir.go
+++ b/netdeploy/remote/nodecfg/nodeDir.go
@@ -19,7 +19,6 @@ package nodecfg
import (
"encoding/json"
"fmt"
- "io/ioutil"
"net/url"
"os"
"path/filepath"
@@ -174,9 +173,11 @@ func (nd *nodeDir) configureAPIToken(token string) (err error) {
return
}
fmt.Fprintf(os.Stdout, " - Assigning APIToken: %s\n", token)
- ioutil.WriteFile(filepath.Join(nd.dataDir, tokens.AlgodTokenFilename), []byte(token), 0600)
- err = nd.saveConfig()
- return
+ err = os.WriteFile(filepath.Join(nd.dataDir, tokens.AlgodTokenFilename), []byte(token), 0600)
+ if err != nil {
+ return err
+ }
+ return nd.saveConfig()
}
func (nd *nodeDir) configureTelemetry(enable bool) (err error) {
diff --git a/network/limitlistener/rejectingLimitListener_test.go b/network/limitlistener/rejectingLimitListener_test.go
index 7f286e13d..a3b955fc5 100644
--- a/network/limitlistener/rejectingLimitListener_test.go
+++ b/network/limitlistener/rejectingLimitListener_test.go
@@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"net"
"net/http"
"testing"
@@ -57,7 +56,7 @@ func TestRejectingLimitListenerBasic(t *testing.T) {
return
}
- io.Copy(ioutil.Discard, r.Body)
+ io.Copy(io.Discard, r.Body)
r.Body.Close()
queryCh <- nil
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index a7c874cda..dff621fea 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -22,7 +22,7 @@ import (
"encoding/base64"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"net"
"net/http"
"net/textproto"
@@ -201,7 +201,7 @@ type GossipNode interface {
// this node to send corresponding MsgOfInterest notifications to any
// newly connecting peers. This should be called before the network
// is started.
- RegisterMessageInterest(protocol.Tag) error
+ RegisterMessageInterest(protocol.Tag)
// SubstituteGenesisID substitutes the "{genesisID}" with their network-specific genesisID.
SubstituteGenesisID(rawURL string) string
@@ -1749,9 +1749,10 @@ func (wn *WebsocketNetwork) sendPeerConnectionsTelemetryStatus() {
var connectionDetails telemetryspec.PeersConnectionDetails
for _, peer := range peers {
connDetail := telemetryspec.PeerConnectionDetails{
- ConnectionDuration: uint(now.Sub(peer.createTime).Seconds()),
- TelemetryGUID: peer.TelemetryGUID,
- InstanceName: peer.InstanceName,
+ ConnectionDuration: uint(now.Sub(peer.createTime).Seconds()),
+ TelemetryGUID: peer.TelemetryGUID,
+ InstanceName: peer.InstanceName,
+ DuplicateFilterCount: peer.duplicateFilterCount,
}
if peer.outgoing {
connDetail.Address = justHost(peer.conn.RemoteAddr().String())
@@ -2031,7 +2032,7 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) {
if err == websocket.ErrBadHandshake {
// reading here from ioutil is safe only because it came from DialContext above, which alredy finsihed reading all the data from the network
// and placed it all in a ioutil.NopCloser reader.
- bodyBytes, _ := ioutil.ReadAll(response.Body)
+ bodyBytes, _ := io.ReadAll(response.Body)
errString := string(bodyBytes)
if len(errString) > 128 {
errString = errString[:128]
@@ -2308,7 +2309,7 @@ func SetUserAgentHeader(header http.Header) {
// this node to send corresponding MsgOfInterest notifications to any
// newly connecting peers. This should be called before the network
// is started.
-func (wn *WebsocketNetwork) RegisterMessageInterest(t protocol.Tag) error {
+func (wn *WebsocketNetwork) RegisterMessageInterest(t protocol.Tag) {
wn.messagesOfInterestMu.Lock()
defer wn.messagesOfInterestMu.Unlock()
@@ -2321,11 +2322,10 @@ func (wn *WebsocketNetwork) RegisterMessageInterest(t protocol.Tag) error {
wn.messagesOfInterest[t] = true
wn.updateMessagesOfInterestEnc()
- return nil
}
// DeregisterMessageInterest will tell peers to no longer send us traffic with a protocol Tag
-func (wn *WebsocketNetwork) DeregisterMessageInterest(t protocol.Tag) error {
+func (wn *WebsocketNetwork) DeregisterMessageInterest(t protocol.Tag) {
wn.messagesOfInterestMu.Lock()
defer wn.messagesOfInterestMu.Unlock()
@@ -2338,7 +2338,6 @@ func (wn *WebsocketNetwork) DeregisterMessageInterest(t protocol.Tag) error {
delete(wn.messagesOfInterest, t)
wn.updateMessagesOfInterestEnc()
- return nil
}
func (wn *WebsocketNetwork) updateMessagesOfInterestEnc() {
diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go
index 6f14e669f..dc8651125 100644
--- a/network/wsNetwork_test.go
+++ b/network/wsNetwork_test.go
@@ -1467,7 +1467,7 @@ func TestSlowPeerDisconnection(t *testing.T) {
now := time.Now()
expire := now.Add(5 * time.Second)
for {
- time.Sleep(time.Millisecond)
+ time.Sleep(10 * time.Millisecond)
if len(peer.sendBufferHighPrio)+len(peer.sendBufferBulk) == 0 {
break
}
@@ -1838,7 +1838,7 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
waitReady(t, netB, readyTimeout.C)
// have netB asking netA to send it only AgreementVoteTag and ProposalPayloadTag
- require.NoError(t, netB.RegisterMessageInterest(ft2))
+ netB.RegisterMessageInterest(ft2)
// send another message which we can track, so that we'll know that the first message was delivered.
netB.Broadcast(context.Background(), protocol.VoteBundleTag, []byte{0, 1, 2, 3, 4}, true, nil)
messageFilterArriveWg.Wait()
diff --git a/network/wsPeer.go b/network/wsPeer.go
index 313df8ad5..870eefddb 100644
--- a/network/wsPeer.go
+++ b/network/wsPeer.go
@@ -75,6 +75,7 @@ var networkMessageQueueMicrosTotal = metrics.MakeCounter(metrics.MetricName{Name
var duplicateNetworkMessageReceivedTotal = metrics.MakeCounter(metrics.DuplicateNetworkMessageReceivedTotal)
var duplicateNetworkMessageReceivedBytesTotal = metrics.MakeCounter(metrics.DuplicateNetworkMessageReceivedBytesTotal)
+var duplicateNetworkFilterReceivedTotal = metrics.MakeCounter(metrics.DuplicateNetworkFilterReceivedTotal)
var outgoingNetworkMessageFilteredOutTotal = metrics.MakeCounter(metrics.OutgoingNetworkMessageFilteredOutTotal)
var outgoingNetworkMessageFilteredOutBytesTotal = metrics.MakeCounter(metrics.OutgoingNetworkMessageFilteredOutBytesTotal)
@@ -184,6 +185,9 @@ type wsPeer struct {
incomingMsgFilter *messageFilter
outgoingMsgFilter *messageFilter
+ // duplicateFilterCount counts how many times the remote peer has sent us a message hash
+ // to filter that it had already sent before.
+ duplicateFilterCount int64
processed chan struct{}
@@ -576,7 +580,14 @@ func (wp *wsPeer) handleFilterMessage(msg IncomingMessage) {
var digest crypto.Digest
copy(digest[:], msg.Data)
//wp.net.log.Debugf("add filter %v", digest)
- wp.outgoingMsgFilter.CheckDigest(digest, true, true)
+ has := wp.outgoingMsgFilter.CheckDigest(digest, true, true)
+ if has {
+ // Count that this peer has sent us duplicate filter messages: this means it received the same
+ // large message concurrently from several peers, and then sent the filter message to us after
+ // each large message finished transferring.
+ duplicateNetworkFilterReceivedTotal.Inc(nil)
+ atomic.AddInt64(&wp.duplicateFilterCount, 1)
+ }
}
func (wp *wsPeer) writeLoopSend(msgs sendMessages) disconnectReason {
diff --git a/network/wsPeer_test.go b/network/wsPeer_test.go
index 550eb5fbd..800ab5b14 100644
--- a/network/wsPeer_test.go
+++ b/network/wsPeer_test.go
@@ -18,12 +18,14 @@ package network
import (
"encoding/binary"
+ "strings"
"testing"
"time"
"unsafe"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/metrics"
"github.com/stretchr/testify/require"
)
@@ -90,10 +92,10 @@ func TestDefaultMessageTagsLength(t *testing.T) {
}
}
-// TestAtomicVariablesAligment ensures that the 64-bit atomic variables
+// TestAtomicVariablesAlignment ensures that the 64-bit atomic variables
// offsets are 64-bit aligned. This is required due to go atomic library
// limitation.
-func TestAtomicVariablesAligment(t *testing.T) {
+func TestAtomicVariablesAlignment(t *testing.T) {
partitiontest.PartitionTest(t)
p := wsPeer{}
@@ -101,3 +103,25 @@ func TestAtomicVariablesAligment(t *testing.T) {
require.True(t, (unsafe.Offsetof(p.lastPacketTime)%8) == 0)
require.True(t, (unsafe.Offsetof(p.intermittentOutgoingMessageEnqueueTime)%8) == 0)
}
+
+func TestTagCounterFiltering(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ tagCounterTags := map[string]*metrics.TagCounter{
+ "networkSentBytesByTag": networkSentBytesByTag,
+ "networkReceivedBytesByTag": networkReceivedBytesByTag,
+ "networkMessageReceivedByTag": networkMessageReceivedByTag,
+ "networkMessageSentByTag": networkMessageSentByTag,
+ }
+ for name, tag := range tagCounterTags {
+ t.Run(name, func(t *testing.T) {
+ require.NotZero(t, len(tag.AllowedTags))
+ tag.Add("TEST_TAG", 1)
+ b := strings.Builder{}
+ tag.WriteMetric(&b, "")
+ result := b.String()
+ require.Contains(t, result, "_UNK")
+ require.NotContains(t, result, "TEST_TAG")
+ })
+ }
+}
diff --git a/node/node.go b/node/node.go
index 7f0df8140..5e0ec29fb 100644
--- a/node/node.go
+++ b/node/node.go
@@ -21,7 +21,6 @@ import (
"context"
"errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -298,11 +297,13 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
return nil, err
}
if catchpointCatchupState != ledger.CatchpointCatchupStateInactive {
- node.catchpointCatchupService, err = catchup.MakeResumedCatchpointCatchupService(context.Background(), node, node.log, node.net, node.ledger.Ledger, node.config)
+ accessor := ledger.MakeCatchpointCatchupAccessor(node.ledger.Ledger, node.log)
+ node.catchpointCatchupService, err = catchup.MakeResumedCatchpointCatchupService(context.Background(), node, node.log, node.net, accessor, node.config)
if err != nil {
log.Errorf("unable to create catchpoint catchup service: %v", err)
return nil, err
}
+ node.log.Infof("resuming catchpoint catchup from state %d", catchpointCatchupState)
}
node.tracer = messagetracer.NewTracer(log).Init(cfg)
@@ -900,7 +901,7 @@ func (node *AlgorandFullNode) InstallParticipationKey(partKeyBinary []byte) (acc
func (node *AlgorandFullNode) loadParticipationKeys() error {
// Generate a list of all potential participation key files
genesisDir := filepath.Join(node.rootDir, node.genesisID)
- files, err := ioutil.ReadDir(genesisDir)
+ files, err := os.ReadDir(genesisDir)
if err != nil {
return fmt.Errorf("AlgorandFullNode.loadPartitipationKeys: could not read directory %v: %v", genesisDir, err)
}
@@ -1118,7 +1119,8 @@ func (node *AlgorandFullNode) StartCatchup(catchpoint string) error {
return MakeCatchpointUnableToStartError(stats.CatchpointLabel, catchpoint)
}
var err error
- node.catchpointCatchupService, err = catchup.MakeNewCatchpointCatchupService(catchpoint, node, node.log, node.net, node.ledger.Ledger, node.config)
+ accessor := ledger.MakeCatchpointCatchupAccessor(node.ledger.Ledger, node.log)
+ node.catchpointCatchupService, err = catchup.MakeNewCatchpointCatchupService(catchpoint, node, node.log, node.net, accessor, node.config)
if err != nil {
node.log.Warnf("unable to create catchpoint catchup service : %v", err)
return err
@@ -1145,12 +1147,12 @@ func (node *AlgorandFullNode) AbortCatchup(catchpoint string) error {
}
// SetCatchpointCatchupMode change the node's operational mode from catchpoint catchup mode and back, it returns a
-// channel which contains the updated node context. This function need to work asyncronisly so that the caller could
-// detect and handle the usecase where the node is being shut down while we're switching to/from catchup mode without
+// channel which contains the updated node context. This function need to work asynchronously so that the caller could
+// detect and handle the use case where the node is being shut down while we're switching to/from catchup mode without
// deadlocking on the shared node mutex.
func (node *AlgorandFullNode) SetCatchpointCatchupMode(catchpointCatchupMode bool) (outCtxCh <-chan context.Context) {
// create a non-buffered channel to return the newly created context. The fact that it's non-buffered here
- // is imporant, as it allows us to syncronize the "receiving" of the new context before canceling of the previous
+ // is important, as it allows us to synchronize the "receiving" of the new context before canceling of the previous
// one.
ctxCh := make(chan context.Context)
outCtxCh = ctxCh
diff --git a/node/node_test.go b/node/node_test.go
index f440810f6..dcf2bb6a2 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -502,51 +502,6 @@ func TestMismatchingGenesisDirectoryPermissions(t *testing.T) {
require.NoError(t, os.RemoveAll(testDirectroy))
}
-func TestAsyncRecord(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- testDirectroy := t.TempDir()
-
- genesis := bookkeeping.Genesis{
- SchemaID: "go-test-node-record-async",
- Proto: protocol.ConsensusCurrentVersion,
- Network: config.Devtestnet,
- FeeSink: sinkAddr.String(),
- RewardsPool: poolAddr.String(),
- }
-
- cfg := config.GetDefaultLocal()
- cfg.DisableNetworking = true
- node, err := MakeFull(logging.TestingLog(t), testDirectroy, config.GetDefaultLocal(), []string{}, genesis)
- require.NoError(t, err)
- node.Start()
- defer node.Stop()
-
- var addr basics.Address
- addr[0] = 1
-
- p := account.Participation{
- Parent: addr,
- FirstValid: 0,
- LastValid: 1000000,
- Voting: &crypto.OneTimeSignatureSecrets{},
- VRF: &crypto.VRFSecrets{},
- }
- id, err := node.accountManager.Registry().Insert(p)
- require.NoError(t, err)
- err = node.accountManager.Registry().Register(id, 0)
- require.NoError(t, err)
-
- node.Record(addr, 10000, account.Vote)
- node.Record(addr, 20000, account.BlockProposal)
-
- time.Sleep(5000 * time.Millisecond)
- records := node.accountManager.Registry().GetAll()
- require.Len(t, records, 1)
- require.Equal(t, 10000, int(records[0].LastVote))
- require.Equal(t, 20000, int(records[0].LastBlockProposal))
-}
-
// TestOfflineOnlineClosedBitStatus a test that validates that the correct bits are being set
func TestOfflineOnlineClosedBitStatus(t *testing.T) {
partitiontest.PartitionTest(t)
diff --git a/nodecontrol/algodControl.go b/nodecontrol/algodControl.go
index 872fa05a4..e614cf63f 100644
--- a/nodecontrol/algodControl.go
+++ b/nodecontrol/algodControl.go
@@ -18,7 +18,6 @@ package nodecontrol
import (
"fmt"
- "io/ioutil"
"net/url"
"os"
"os/exec"
@@ -368,7 +367,7 @@ func (nc NodeController) GetGenesis() (bookkeeping.Genesis, error) {
var genesis bookkeeping.Genesis
genesisFile := filepath.Join(nc.GetDataDir(), config.GenesisJSONFile)
- genesisText, err := ioutil.ReadFile(genesisFile)
+ genesisText, err := os.ReadFile(genesisFile)
if err != nil {
return genesis, err
}
@@ -417,7 +416,7 @@ func (nc NodeController) setAlgodCmdLogFiles(cmd *exec.Cmd) (files []*os.File) {
func (nc NodeController) readGenesisJSON(genesisFile string) (genesisLedger bookkeeping.Genesis, err error) {
// Load genesis
- genesisText, err := ioutil.ReadFile(genesisFile)
+ genesisText, err := os.ReadFile(genesisFile)
if err != nil {
return
}
diff --git a/protocol/codec.go b/protocol/codec.go
index 1153b7c61..e74b4b3e8 100644
--- a/protocol/codec.go
+++ b/protocol/codec.go
@@ -246,6 +246,35 @@ func NewDecoderBytes(b []byte) Decoder {
return codec.NewDecoderBytes(b, CodecHandle)
}
+// NewMsgpDecoderBytes returns a decoder object reading bytes from [b].
+// that works with msgp-serialized objects
+func NewMsgpDecoderBytes(b []byte) *MsgpDecoderBytes {
+ return &MsgpDecoderBytes{b: b, pos: 0}
+}
+
+// MsgpDecoderBytes is a []byte decoder into msgp-encoded objects
+type MsgpDecoderBytes struct {
+ b []byte
+ pos int
+}
+
+// Decode an objptr from from a byte stream
+func (d *MsgpDecoderBytes) Decode(objptr msgp.Unmarshaler) error {
+ if !objptr.CanUnmarshalMsg(objptr) {
+ return fmt.Errorf("object %T cannot be msgp-unmashalled", objptr)
+ }
+ if d.pos >= len(d.b) {
+ return io.EOF
+ }
+
+ rem, err := objptr.UnmarshalMsg(d.b[d.pos:])
+ if err != nil {
+ return err
+ }
+ d.pos = (len(d.b) - len(rem))
+ return nil
+}
+
// encodingPool holds temporary byte slice buffers used for encoding messages.
var encodingPool = sync.Pool{
New: func() interface{} {
diff --git a/protocol/codec_test.go b/protocol/codec_test.go
index 79814dadc..e623f9024 100644
--- a/protocol/codec_test.go
+++ b/protocol/codec_test.go
@@ -17,6 +17,9 @@
package protocol
import (
+ "fmt"
+ "io"
+ "math/rand"
"reflect"
"testing"
@@ -199,3 +202,68 @@ func TestEncodeJSON(t *testing.T) {
require.True(t, reflect.DeepEqual(v, nsv))
require.True(t, reflect.DeepEqual(v, sv))
}
+
+func TestMsgpDecode(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var tag Tag = "test"
+ dec := NewMsgpDecoderBytes([]byte{1, 2, 3})
+ err := dec.Decode(&tag)
+ require.Error(t, err)
+
+ data := EncodeMsgp(tag)
+ dec = NewMsgpDecoderBytes(data)
+ var tag2 Tag
+ err = dec.Decode(&tag2)
+ require.Equal(t, tag, tag2)
+ require.NoError(t, err)
+
+ limit := rand.Intn(30)
+ tags := make([]Tag, limit)
+ buf := make([]byte, 0, limit*10)
+ for i := 0; i < limit; i++ {
+ tags[i] = Tag(fmt.Sprintf("tag_%d", i))
+ buf = append(buf, EncodeMsgp(tags[i])...)
+ }
+
+ dec = NewMsgpDecoderBytes(buf)
+ for i := 0; i < limit; i++ {
+ err = dec.Decode(&tag2)
+ require.NoError(t, err)
+ require.Equal(t, tags[i], tag2)
+ }
+ err = dec.Decode(&tag2)
+ require.Error(t, err)
+ require.ErrorIs(t, err, io.EOF)
+}
+
+func TestRandomizeObjectWithPtrField(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ type testObjA struct {
+ U64 uint64
+ }
+ type testObjB struct {
+ U16 uint16
+ ObjA *testObjA
+ }
+
+ // run a few and fail if all ints are zero
+ sawNonZeroU16 := false
+ sawNonZeroU64 := false
+ for i := 0; i < 10; i++ {
+ obj, err := RandomizeObject(&testObjB{})
+ require.NoError(t, err)
+ objB, ok := obj.(*testObjB)
+ require.True(t, ok)
+ require.NotNil(t, objB.ObjA)
+ if objB.U16 != 0 {
+ sawNonZeroU16 = true
+ }
+ if objB.ObjA.U64 != 0 {
+ sawNonZeroU64 = true
+ }
+ }
+ require.True(t, sawNonZeroU16, "RandomizeObject made all zeroes for testObjB.U16")
+ require.True(t, sawNonZeroU64, "RandomizeObject made all zeroes for testObjA.U64")
+}
diff --git a/protocol/codec_tester.go b/protocol/codec_tester.go
index 694c2c492..f40270039 100644
--- a/protocol/codec_tester.go
+++ b/protocol/codec_tester.go
@@ -17,9 +17,7 @@
package protocol
import (
- "errors"
"fmt"
- "io/ioutil"
"math/rand"
"os"
"path"
@@ -147,11 +145,11 @@ func checkMsgpAllocBoundDirective(dataType reflect.Type) bool {
return nil
})
for _, packageFile := range packageFiles {
- fileBytes, err := ioutil.ReadFile(packageFile)
+ fileBytes, err := os.ReadFile(packageFile)
if err != nil {
continue
}
- if strings.Index(string(fileBytes), fmt.Sprintf("msgp:allocbound %s", dataType.Name())) != -1 {
+ if strings.Contains(string(fileBytes), fmt.Sprintf("msgp:allocbound %s", dataType.Name())) {
// message pack alloc bound definition was found.
return true
}
@@ -230,7 +228,13 @@ func randomizeValue(v reflect.Value, datapath string, tag string, remainingChang
switch v.Kind() {
case reflect.Uint, reflect.Uintptr, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- v.SetUint(rand.Uint64())
+ if strings.HasSuffix(datapath, "/HashType") &&
+ strings.HasSuffix(v.Type().PkgPath(), "go-algorand/crypto") && v.Type().Name() == "HashType" {
+ // generate value that will avoid protocol.ErrInvalidObject from HashType.Validate()
+ v.SetUint(rand.Uint64() % 3) // 3 is crypto.MaxHashType
+ } else {
+ v.SetUint(rand.Uint64())
+ }
*remainingChanges--
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
v.SetInt(int64(rand.Uint64()))
@@ -243,6 +247,12 @@ func randomizeValue(v reflect.Value, datapath string, tag string, remainingChang
}
v.SetString(string(buf))
*remainingChanges--
+ case reflect.Ptr:
+ v.Set(reflect.New(v.Type().Elem()))
+ err := randomizeValue(reflect.Indirect(v), datapath, tag, remainingChanges, seenTypes)
+ if err != nil {
+ return err
+ }
case reflect.Struct:
st := v.Type()
if !seenTypes[st] {
@@ -352,7 +362,11 @@ func EncodingTest(template msgpMarshalUnmarshal) error {
}
if debugCodecTester {
- ioutil.WriteFile("/tmp/v0", []byte(fmt.Sprintf("%#v", v0)), 0666)
+ err = os.WriteFile("/tmp/v0", []byte(fmt.Sprintf("%#v", v0)), 0666)
+ if err != nil {
+ return err
+ }
+
}
e1 := EncodeMsgp(v0.(msgp.Marshaler))
@@ -360,8 +374,14 @@ func EncodingTest(template msgpMarshalUnmarshal) error {
// for debug, write out the encodings to a file
if debugCodecTester {
- ioutil.WriteFile("/tmp/e1", e1, 0666)
- ioutil.WriteFile("/tmp/e2", e2, 0666)
+ err = os.WriteFile("/tmp/e1", e1, 0666)
+ if err != nil {
+ return err
+ }
+ err = os.WriteFile("/tmp/e2", e2, 0666)
+ if err != nil {
+ return err
+ }
}
if !reflect.DeepEqual(e1, e2) {
@@ -382,8 +402,14 @@ func EncodingTest(template msgpMarshalUnmarshal) error {
}
if debugCodecTester {
- ioutil.WriteFile("/tmp/v1", []byte(fmt.Sprintf("%#v", v1)), 0666)
- ioutil.WriteFile("/tmp/v2", []byte(fmt.Sprintf("%#v", v2)), 0666)
+ err = os.WriteFile("/tmp/v1", []byte(fmt.Sprintf("%#v", v1)), 0666)
+ if err != nil {
+ return err
+ }
+ err = os.WriteFile("/tmp/v2", []byte(fmt.Sprintf("%#v", v2)), 0666)
+ if err != nil {
+ return err
+ }
}
// At this point, it might be that v differs from v1 and v2,
@@ -402,8 +428,14 @@ func EncodingTest(template msgpMarshalUnmarshal) error {
ee2 := EncodeReflect(v1)
if debugCodecTester {
- ioutil.WriteFile("/tmp/ee1", ee1, 0666)
- ioutil.WriteFile("/tmp/ee2", ee2, 0666)
+ err = os.WriteFile("/tmp/ee1", ee1, 0666)
+ if err != nil {
+ return err
+ }
+ err = os.WriteFile("/tmp/ee2", ee2, 0666)
+ if err != nil {
+ return err
+ }
}
if !reflect.DeepEqual(e1, ee1) {
@@ -427,15 +459,7 @@ func RunEncodingTest(t *testing.T, template msgpMarshalUnmarshal) {
t.Skip()
return
}
- if err == nil {
- continue
- }
- // some objects might appen to the original error additional info.
- // we ensure that invalidObject error is not failing the test.
- if errors.As(err, &ErrInvalidObject) {
- continue
- }
require.NoError(t, err)
}
}
diff --git a/protocol/consensus.go b/protocol/consensus.go
index a996525ad..cd03519fb 100644
--- a/protocol/consensus.go
+++ b/protocol/consensus.go
@@ -198,6 +198,20 @@ const ConsensusFuture = ConsensusVersion(
"future",
)
+// ConsensusVAlpha1 is the first consensus protocol for AlphaNet, which is the same as
+// v32, but with a 2-second filter timeout and 5M block size.
+const ConsensusVAlpha1 = ConsensusVersion("alpha1")
+
+// ConsensusVAlpha2 is the second consensus protocol for AlphaNet, which increases the
+// filter timeout to 3.5 seconds and uses 5MiB blocks.
+const ConsensusVAlpha2 = ConsensusVersion("alpha2")
+
+// ConsensusVAlpha3 uses the same parameters as ConsensusV33.
+const ConsensusVAlpha3 = ConsensusVersion("alpha3")
+
+// ConsensusVAlpha4 uses the same parameters as ConsensusV34.
+const ConsensusVAlpha4 = ConsensusVersion("alpha4")
+
// !!! ********************* !!!
// !!! *** Please update ConsensusCurrentVersion when adding new protocol versions *** !!!
// !!! ********************* !!!
diff --git a/protocol/transcode/core_test.go b/protocol/transcode/core_test.go
index 132c4270a..e9cfc42f8 100644
--- a/protocol/transcode/core_test.go
+++ b/protocol/transcode/core_test.go
@@ -20,7 +20,6 @@ import (
"encoding/base32"
"fmt"
"io"
- "io/ioutil"
"testing"
"github.com/stretchr/testify/require"
@@ -56,7 +55,7 @@ func testIdempotentRoundtrip(t *testing.T, mpdata []byte) {
}
p1in.Close()
}()
- res, err := ioutil.ReadAll(p3out)
+ res, err := io.ReadAll(p3out)
require.NoError(t, err)
require.Equal(t, mpdata, res)
diff --git a/rpcs/blockService_test.go b/rpcs/blockService_test.go
index f05c0f6ef..2adb2f1b2 100644
--- a/rpcs/blockService_test.go
+++ b/rpcs/blockService_test.go
@@ -19,7 +19,7 @@ package rpcs
import (
"context"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"strings"
"testing"
@@ -173,7 +173,7 @@ func TestRedirectFallbackArchiver(t *testing.T) {
require.NoError(t, err)
require.Equal(t, http.StatusOK, response.StatusCode)
- bodyData, err := ioutil.ReadAll(response.Body)
+ bodyData, err := io.ReadAll(response.Body)
require.NoError(t, err)
require.NotEqual(t, 0, len(bodyData))
}
diff --git a/rpcs/txSyncer_test.go b/rpcs/txSyncer_test.go
index d4b0d2fbc..0d09bb087 100644
--- a/rpcs/txSyncer_test.go
+++ b/rpcs/txSyncer_test.go
@@ -19,6 +19,7 @@ package rpcs
import (
"context"
"errors"
+ "math/rand"
"net/http"
"net/rpc"
"strings"
@@ -44,9 +45,31 @@ type mockPendingTxAggregate struct {
txns []transactions.SignedTxn
}
+var testSource rand.Source
+var testRand *rand.Rand
+
+func init() {
+ testSource = rand.NewSource(12345678)
+ testRand = rand.New(testSource)
+}
+
+func testRandBytes(d []byte) {
+ // We don't need cryptographically strong random bytes for a
+ // unit test, we _do_ need deterministic 'random' bytes so
+ // that _sometimes_ a bloom filter doesn't fail on the data
+ // (e.g. TestSync() below).
+ n, err := testRand.Read(d)
+ if n != len(d) {
+ panic("short rand read")
+ }
+ if err != nil {
+ panic(err)
+ }
+}
+
func makeMockPendingTxAggregate(txCount int) mockPendingTxAggregate {
var secret [32]byte
- crypto.RandBytes(secret[:])
+ testRandBytes(secret[:])
sk := crypto.GenerateSignatureSecrets(crypto.Seed(secret))
mock := mockPendingTxAggregate{
txns: make([]transactions.SignedTxn, txCount),
@@ -54,7 +77,7 @@ func makeMockPendingTxAggregate(txCount int) mockPendingTxAggregate {
for i := 0; i < txCount; i++ {
var note [16]byte
- crypto.RandBytes(note[:])
+ testRandBytes(note[:])
tx := transactions.Transaction{
Type: protocol.PaymentTx,
Header: transactions.Header{
diff --git a/scripts/build_deb.sh b/scripts/build_deb.sh
index 0cd7c154a..3c5b1e219 100755
--- a/scripts/build_deb.sh
+++ b/scripts/build_deb.sh
@@ -74,7 +74,7 @@ for data in "${data_files[@]}"; do
done
if [ ! -z "${RELEASE_GENESIS_PROCESS}" ]; then
- genesis_dirs=("devnet" "testnet" "mainnet" "betanet")
+ genesis_dirs=("devnet" "testnet" "mainnet" "betanet" "alphanet")
for dir in "${genesis_dirs[@]}"; do
mkdir -p "${PKG_ROOT}/var/lib/algorand/genesis/${dir}"
cp "${REPO_DIR}/installer/genesis/${dir}/genesis.json" "${PKG_ROOT}/var/lib/algorand/genesis/${dir}/genesis.json"
@@ -82,7 +82,7 @@ if [ ! -z "${RELEASE_GENESIS_PROCESS}" ]; then
done
# Copy the appropriate network genesis.json for our default (in root ./genesis folder)
cp "${PKG_ROOT}/var/lib/algorand/genesis/${DEFAULT_RELEASE_NETWORK}/genesis.json" "${PKG_ROOT}/var/lib/algorand"
-elif [[ "${CHANNEL}" == "dev" || "${CHANNEL}" == "stable" || "${CHANNEL}" == "nightly" || "${CHANNEL}" == "beta" ]]; then
+elif [[ "${CHANNEL}" == "dev" || "${CHANNEL}" == "stable" || "${CHANNEL}" == "nightly" || "${CHANNEL}" == "beta"|| "${CHANNEL}" == "alpha" ]]; then
cp "${REPO_DIR}/installer/genesis/${DEFAULTNETWORK}/genesis.json" "${PKG_ROOT}/var/lib/algorand/genesis.json"
#${GOPATH}/bin/buildtools genesis ensure -n ${DEFAULTNETWORK} --source ${REPO_DIR}/gen/${DEFAULTNETWORK}/genesis.json --target ${PKG_ROOT}/var/lib/algorand/genesis.json --releasedir ${REPO_DIR}/installer/genesis
else
diff --git a/scripts/build_package.sh b/scripts/build_package.sh
index ccbf0d27e..d8f169201 100755
--- a/scripts/build_package.sh
+++ b/scripts/build_package.sh
@@ -88,7 +88,7 @@ done
mkdir ${PKG_ROOT}/genesis
-genesis_dirs=("devnet" "testnet" "mainnet" "betanet")
+genesis_dirs=("devnet" "testnet" "mainnet" "betanet" "alphanet")
for dir in "${genesis_dirs[@]}"; do
mkdir -p ${PKG_ROOT}/genesis/${dir}
if [ -f "${REPO_DIR}/gen/${dir}/genesis.json" ]; then
diff --git a/scripts/buildtools/check_tests.py b/scripts/buildtools/check_tests.py
index d34bb7038..8bf8a66d4 100755
--- a/scripts/buildtools/check_tests.py
+++ b/scripts/buildtools/check_tests.py
@@ -6,8 +6,9 @@ import argparse
# Arguments parsing / help menu
parser = argparse.ArgumentParser(description='Check test results for intentionally and unintentionally skipped tests, as well as tests that ran multiple times.')
-parser.add_argument('tests_results_filepath', metavar='RESULTS_FILE',
- help='json format test results file path (e.g. /tmp/results/testresults.json)')
+parser.add_argument('--tests-results-filepath', metavar='RESULTS_FILE',
+ help='json format test results file path (e.g. /tmp/results/testresults.json)', required=True)
+parser.add_argument('--ignored-tests', nargs = '*', help='Exact test names to ignore during verification')
args = parser.parse_args()
# Go through the given file one json object at a time, and record into a dict
@@ -17,7 +18,10 @@ with open(args.tests_results_filepath) as f:
testDict = json.loads(jsonObj)
if 'Test' not in testDict:
continue
-
+
+ if args.ignored_tests and testDict['Test'] in args.ignored_tests:
+ continue
+
fullTestName = testDict['Package'] + ' ' + testDict['Test']
if fullTestName not in AllTestResults:
AllTestResults[fullTestName] = {}
diff --git a/scripts/buildtools/install_buildtools.sh b/scripts/buildtools/install_buildtools.sh
index 2e0db6353..7a004851a 100755
--- a/scripts/buildtools/install_buildtools.sh
+++ b/scripts/buildtools/install_buildtools.sh
@@ -87,9 +87,9 @@ if [[ "${BUILDTOOLS_INSTALL}" != "ALL" ]]; then
exit 0
fi
-install_go_module golang.org/x/lint golang.org/x/lint/golint
install_go_module golang.org/x/tools golang.org/x/tools/cmd/stringer
install_go_module github.com/go-swagger/go-swagger github.com/go-swagger/go-swagger/cmd/swagger
install_go_module github.com/algorand/msgp
install_go_module gotest.tools/gotestsum
install_go_module github.com/algorand/oapi-codegen github.com/algorand/oapi-codegen/cmd/oapi-codegen
+install_go_module github.com/golangci/golangci-lint/cmd/golangci-lint
diff --git a/scripts/buildtools/versions b/scripts/buildtools/versions
index c5a247b03..04960db22 100644
--- a/scripts/buildtools/versions
+++ b/scripts/buildtools/versions
@@ -4,3 +4,4 @@ github.com/algorand/msgp v1.1.52
github.com/algorand/oapi-codegen v1.3.7
github.com/go-swagger/go-swagger v0.25.0
gotest.tools/gotestsum v1.6.4
+github.com/golangci/golangci-lint/cmd/golangci-lint v1.47.3
diff --git a/scripts/check_deps.sh b/scripts/check_deps.sh
index 4752108a2..a42405733 100755
--- a/scripts/check_deps.sh
+++ b/scripts/check_deps.sh
@@ -35,9 +35,9 @@ missing_dep() {
}
GO_DEPS=(
- "$GO_BIN/golint"
"$GO_BIN/stringer"
"$GO_BIN/msgp"
+ "$GO_BIN/golangci-lint"
)
check_deps() {
diff --git a/scripts/compute_branch_channel.sh b/scripts/compute_branch_channel.sh
index 90cde3b55..6e8f77eac 100755
--- a/scripts/compute_branch_channel.sh
+++ b/scripts/compute_branch_channel.sh
@@ -10,6 +10,8 @@ elif [ "$1" = "rel/stable" ]; then
echo "stable"
elif [ "$1" = "rel/beta" ]; then
echo "beta"
+elif [ "$1" = "feature/alphanet" ]; then
+ echo "alpha"
else
echo "dev"
fi
diff --git a/scripts/compute_branch_network.sh b/scripts/compute_branch_network.sh
index 9967463b1..a6a0d8f24 100755
--- a/scripts/compute_branch_network.sh
+++ b/scripts/compute_branch_network.sh
@@ -15,6 +15,9 @@ if [ "${BRANCH}" = "rel/stable" ]; then
elif [ "${BRANCH}" = "rel/beta" ]; then
echo "betanet"
exit 0
+elif [ "${BRANCH}" = "feature/alphanet" ]; then
+ echo "alphanet"
+ exit 0
fi
#get parent of current branch
@@ -26,6 +29,8 @@ if [ "${BRANCHPARENT}" = "rel/stable" ]; then
echo "testnet"
elif [ "${BRANCHPARENT}" = "rel/beta" ]; then
echo "betanet"
+elif [ "${BRANCHPARENT}" = "feature/alphanet" ]; then
+ echo "alphanet"
else
echo "devnet"
fi
diff --git a/scripts/compute_package_name.sh b/scripts/compute_package_name.sh
index 7e53a1351..0a81ffb2f 100755
--- a/scripts/compute_package_name.sh
+++ b/scripts/compute_package_name.sh
@@ -10,14 +10,12 @@
CHANNEL=${1:-stable}
NAME=${2:-algorand}
-if [ ! -z ${PACKAGE_NAME_EXTENSION} ]; then
+if [ -n "${PACKAGE_NAME_EXTENSION}" ]; then
NAME="${NAME}-${PACKAGE_NAME_EXTENSION}"
fi
-if [ "$CHANNEL" = beta ]; then
- echo "$NAME-beta"
-elif [ "$CHANNEL" = nightly ]; then
- echo "$NAME-nightly"
-else
+if [ "$CHANNEL" = stable ]; then
echo "$NAME"
+else
+ echo "$NAME-$CHANNEL"
fi
diff --git a/scripts/get_golang_version.sh b/scripts/get_golang_version.sh
index 390847ed4..4e3525a54 100755
--- a/scripts/get_golang_version.sh
+++ b/scripts/get_golang_version.sh
@@ -11,7 +11,7 @@
# Our build task-runner `mule` will refer to this script and will automatically
# build a new image whenever the version number has been changed.
-BUILD=1.17.9
+BUILD=1.17.13
MIN=1.17
GO_MOD_SUPPORT=1.17
diff --git a/scripts/release/build/deb/build_deb.sh b/scripts/release/build/deb/build_deb.sh
index aeaa4eb30..275c37cd2 100755
--- a/scripts/release/build/deb/build_deb.sh
+++ b/scripts/release/build/deb/build_deb.sh
@@ -57,7 +57,7 @@ if [[ ! "$PKG_NAME" =~ devtools ]]; then
cp "./installer/$data" "$PKG_ROOT/var/lib/algorand"
done
- genesis_dirs=("devnet" "testnet" "mainnet" "betanet")
+ genesis_dirs=("devnet" "testnet" "mainnet" "betanet" "alphanet")
for dir in "${genesis_dirs[@]}"; do
mkdir -p "$PKG_ROOT/var/lib/algorand/genesis/$dir"
cp "./installer/genesis/$dir/genesis.json" "$PKG_ROOT/var/lib/algorand/genesis/$dir/genesis.json"
diff --git a/scripts/release/mule/common/get_channel.sh b/scripts/release/mule/common/get_channel.sh
index b59937a1a..d5b82d640 100755
--- a/scripts/release/mule/common/get_channel.sh
+++ b/scripts/release/mule/common/get_channel.sh
@@ -2,7 +2,10 @@
NETWORK="$1"
-if [ "$NETWORK" = betanet ]
+if [ "$NETWORK" = alphanet ]
+then
+ echo alpha
+elif [ "$NETWORK" = betanet ]
then
echo beta
elif [ "$NETWORK" = mainnet ] || [ "$NETWORK" = testnet ]
diff --git a/scripts/release/mule/deploy/docker/docker.sh b/scripts/release/mule/deploy/docker/docker.sh
index c4125f041..ee0c55fe0 100755
--- a/scripts/release/mule/deploy/docker/docker.sh
+++ b/scripts/release/mule/deploy/docker/docker.sh
@@ -13,9 +13,9 @@ if [ -z "$NETWORK" ] || [ -z "$VERSION" ]; then
exit 1
fi
-if [[ ! "$NETWORK" =~ ^mainnet$|^testnet$|^betanet$ ]]
+if [[ ! "$NETWORK" =~ ^mainnet$|^testnet$|^betanet$|^alphanet$ ]]
then
- echo "[$0] Network values must be either \`mainnet\`, \`testnet\` or \`betanet\`."
+ echo "[$0] Network values must be either \`mainnet\`, \`testnet\`, \`betanet\`, or \`alphanet\`."
exit 1
fi
@@ -28,9 +28,9 @@ then
# Build and push testnet.
./build_releases.sh --tagname "$VERSION" --network testnet --cached
-elif [ "$NETWORK" = betanet ]
+elif [ "$NETWORK" = betanet ] || [ "$NETWORK" = alphanet ]
then
- ./build_releases.sh --tagname "$VERSION" --network betanet
+ ./build_releases.sh --tagname "$VERSION" --network "$NETWORK"
fi
popd
diff --git a/scripts/release/mule/package/deb/package.sh b/scripts/release/mule/package/deb/package.sh
index 6cd9f653e..8a8612a3a 100755
--- a/scripts/release/mule/package/deb/package.sh
+++ b/scripts/release/mule/package/deb/package.sh
@@ -63,7 +63,7 @@ find tmp/node_pkgs -name "*${CHANNEL}*linux*${VERSION}*.tar.gz" | cut -d '/' -f3
cp "installer/$data" "$PKG_ROOT/var/lib/algorand"
done
- genesis_dirs=("devnet" "testnet" "mainnet" "betanet")
+ genesis_dirs=("devnet" "testnet" "mainnet" "betanet" "alphanet")
for dir in "${genesis_dirs[@]}"; do
mkdir -p "$PKG_ROOT/var/lib/algorand/genesis/$dir"
cp "./installer/genesis/$dir/genesis.json" "$PKG_ROOT/var/lib/algorand/genesis/$dir/genesis.json"
diff --git a/scripts/release/mule/test/test.sh b/scripts/release/mule/test/test.sh
index aed2a5110..27efe724a 100755
--- a/scripts/release/mule/test/test.sh
+++ b/scripts/release/mule/test/test.sh
@@ -30,8 +30,8 @@ export OS_TYPE
export SHA
-ALGORAND_PACKAGE_NAME=$([ "$CHANNEL" = beta ] && echo algorand-beta || echo algorand)
-DEVTOOLS_PACKAGE_NAME=$([ "$CHANNEL" = beta ] && echo algorand-devtools-beta || echo algorand-devtools)
+ALGORAND_PACKAGE_NAME=$(( [ "$CHANNEL" = beta ] && echo algorand-beta ) || ( [ "$CHANNEL" = alpha ] && echo algorand-alpha ) || ( echo algorand ))
+DEVTOOLS_PACKAGE_NAME=$(( [ "$CHANNEL" = beta ] && echo algorand-devtools-beta ) || ( [ "$CHANNEL" = alpha ] && echo algorand-devtools-alpha ) || ( echo algorand-devtools ))
export ALGORAND_PACKAGE_NAME
export DEVTOOLS_PACKAGE_NAME
@@ -61,7 +61,7 @@ then
# so although it appears as though I just lied to you, I did not :)
#
# rpm
- if [ "$CHANNEL" = "beta" ]
+ if [ "$CHANNEL" = "beta" ] || [ "$CHANNEL" = "alpha" ]
then
PACKAGE_NAME_SUFFIX="$CHANNEL-$VERSION-1.$ARCH_BIT"
else
diff --git a/scripts/travis/codegen_verification.sh b/scripts/travis/codegen_verification.sh
index ed0f5825c..8ba594d7b 100755
--- a/scripts/travis/codegen_verification.sh
+++ b/scripts/travis/codegen_verification.sh
@@ -27,43 +27,6 @@ eval "$(~/gimme "${GOLANG_VERSION}")"
make gen SHORT_PART_PERIOD=1
-function runGoFmt() {
- unformatted=$(gofmt -l .)
- [ -z "$unformatted" ] && return 0
-
- # Some files are not gofmt'd. Print message and fail.
-
- echo >&2 "Go files must be formatted with gofmt. Please run:"
- for fn in $unformatted; do
- echo >&2 " gofmt -w $PWD/$fn"
- done
-
- return 1
-}
-
-function runGoLint() {
- warningCount=$("$GOPATH"/bin/golint $(go list ./... | grep -v /vendor/ | grep -v /test/e2e-go/) | wc -l | tr -d ' ')
- if [ "${warningCount}" = "0" ]; then
- return 0
- fi
-
- echo >&2 "golint must be clean. Please run the following to list issues(${warningCount}):"
- echo >&2 " make lint"
-
- # run the linter again to output the actual issues
- "$GOPATH"/bin/golint $(go list ./... | grep -v /vendor/ | grep -v /test/e2e-go/) >&2
- return 1
-}
-
-echo "Running go vet..."
-make vet
-
-echo "Running gofmt..."
-runGoFmt
-
-echo "Running golint..."
-runGoLint
-
echo "Running check_license..."
./scripts/check_license.sh
diff --git a/shared/pingpong/accounts.go b/shared/pingpong/accounts.go
index fa8cfecdf..52d1ab38a 100644
--- a/shared/pingpong/accounts.go
+++ b/shared/pingpong/accounts.go
@@ -17,8 +17,9 @@
package pingpong
import (
+ "encoding/binary"
"fmt"
- "io/ioutil"
+ "log"
"math/rand"
"os"
"path/filepath"
@@ -34,30 +35,79 @@ import (
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/libgoal"
- "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
)
-func (pps *WorkerState) ensureAccounts(ac libgoal.Client, initCfg PpConfig) (accounts map[string]*pingPongAccount, cfg PpConfig, err error) {
- accounts = make(map[string]*pingPongAccount)
- cfg = initCfg
+func deterministicAccounts(initCfg PpConfig) <-chan *crypto.SignatureSecrets {
+ out := make(chan *crypto.SignatureSecrets)
+ if initCfg.GeneratedAccountSampleMethod == "" || initCfg.GeneratedAccountSampleMethod == "random" {
+ go randomDeterministicAccounts(initCfg, out)
+ } else if initCfg.GeneratedAccountSampleMethod == "sequential" {
+ go sequentialDeterministicAccounts(initCfg, out)
+ }
+ return out
+}
+func randomDeterministicAccounts(initCfg PpConfig, out chan *crypto.SignatureSecrets) {
+ numAccounts := initCfg.NumPartAccounts
+ totalAccounts := initCfg.GeneratedAccountsCount
+ if totalAccounts < numAccounts*4 {
+ // simpler rand strategy for smaller totalAccounts
+ order := rand.Perm(int(totalAccounts))[:numAccounts]
+ for _, acct := range order {
+ var seed crypto.Seed
+ binary.LittleEndian.PutUint64(seed[:], uint64(acct))
+ out <- crypto.GenerateSignatureSecrets(seed)
+ }
+ } else {
+ // randomly select numAccounts from generatedAccountsCount
+ // better for generatedAccountsCount much bigger than numAccounts
+ selected := make(map[uint32]bool, numAccounts)
+ for uint32(len(selected)) < numAccounts {
+ acct := uint32(rand.Int31n(int32(totalAccounts)))
+ if selected[acct] {
+ continue // already picked this account
+ }
+ // generate deterministic secret key from integer ID
+ // same uint64 seed used as netdeploy/remote/deployedNetwork.go
+ var seed crypto.Seed
+ binary.LittleEndian.PutUint64(seed[:], uint64(acct))
+ out <- crypto.GenerateSignatureSecrets(seed)
+ selected[acct] = true
+ }
+ }
+ close(out)
+}
+
+func sequentialDeterministicAccounts(initCfg PpConfig, out chan *crypto.SignatureSecrets) {
+ for i := uint32(0); i < initCfg.NumPartAccounts; i++ {
+ acct := uint64(i) + uint64(initCfg.GeneratedAccountsOffset)
+ var seed crypto.Seed
+ binary.LittleEndian.PutUint64(seed[:], uint64(acct))
+ out <- crypto.GenerateSignatureSecrets(seed)
+ }
+}
+
+// load accounts from ${ALGORAND_DATA}/${netname}-${version}/*.rootkey
+func fileAccounts(ac *libgoal.Client) (out <-chan *crypto.SignatureSecrets, err error) {
genID, err2 := ac.GenesisID()
if err2 != nil {
err = err2
return
}
genesisDir := filepath.Join(ac.DataDir(), genID)
- files, err2 := ioutil.ReadDir(genesisDir)
+ files, err2 := os.ReadDir(genesisDir)
if err2 != nil {
err = err2
return
}
- var srcAcctPresent bool
- var richestAccount string
- var richestBalance uint64
+ ch := make(chan *crypto.SignatureSecrets)
+ go enumerateFileAccounts(files, genesisDir, ch)
+ return ch, nil
+}
+func enumerateFileAccounts(files []os.DirEntry, genesisDir string, out chan<- *crypto.SignatureSecrets) {
for _, info := range files {
var handle db.Accessor
@@ -67,7 +117,7 @@ func (pps *WorkerState) ensureAccounts(ac libgoal.Client, initCfg PpConfig) (acc
}
// Fetch a handle to this database
- handle, err = db.MakeErasableAccessor(filepath.Join(genesisDir, info.Name()))
+ handle, err := db.MakeErasableAccessor(filepath.Join(genesisDir, info.Name()))
if err != nil {
// Couldn't open it, skip it
continue
@@ -81,339 +131,304 @@ func (pps *WorkerState) ensureAccounts(ac libgoal.Client, initCfg PpConfig) (acc
continue
}
- publicKey := root.Secrets().SignatureVerifier
- accountAddress := basics.Address(publicKey)
+ out <- root.Secrets()
+ }
+ close(out)
+}
- if accountAddress.String() == cfg.SrcAccount {
- srcAcctPresent = true
- }
+func (pps *WorkerState) ensureAccounts(ac *libgoal.Client) (err error) {
+ if pps.accounts == nil {
+ pps.accounts = make(map[string]*pingPongAccount)
+ }
- amt, err := ac.GetBalance(accountAddress.String())
- if err != nil {
- return nil, PpConfig{}, err
- }
+ if pps.cinfo.OptIns == nil {
+ pps.cinfo.OptIns = make(map[uint64][]string, pps.cfg.NumAsset+pps.cfg.NumApp)
+ }
+ if pps.cinfo.AssetParams == nil {
+ pps.cinfo.AssetParams = make(map[uint64]v1.AssetParams, pps.cfg.NumAsset)
+ }
+ if pps.cinfo.AppParams == nil {
+ pps.cinfo.AppParams = make(map[uint64]v1.AppParams, pps.cfg.NumApp)
+ }
- if !srcAcctPresent && amt > richestBalance {
- richestAccount = accountAddress.String()
- richestBalance = amt
- }
+ sources := make([]<-chan *crypto.SignatureSecrets, 0, 2)
+ // read file accounts for local big source money
+ var fileSource <-chan *crypto.SignatureSecrets
+ fileSource, err = fileAccounts(ac)
+ if err != nil {
+ return
+ }
+ sources = append(sources, fileSource)
+ if pps.cfg.DeterministicKeys {
+ // add deterministic key accounts for re-use across runs
+ detSource := deterministicAccounts(pps.cfg)
+ sources = append(sources, detSource)
+ }
- if !initCfg.Quiet {
- fmt.Printf("Found local account: %s -> %v\n", accountAddress.String(), amt)
- }
+ var srcAcctPresent bool
+ var richestAccount string
+ var richestBalance uint64
+
+ for _, source := range sources {
+ for secret := range source {
+ publicKey := secret.SignatureVerifier
+ accountAddress := basics.Address(publicKey)
+ addr := accountAddress.String()
+
+ if addr == pps.cfg.SrcAccount {
+ srcAcctPresent = true
+ }
+
+ // TODO: switch to v2 API
+ //ai, err := ac.AccountInformationV2(addr, false)
+ ai, err := ac.AccountInformation(addr)
+ if err != nil {
+ return err
+ }
+ amt := ai.Amount
+
+ if !srcAcctPresent && amt > richestBalance {
+ richestAccount = addr
+ richestBalance = amt
+ }
- accounts[accountAddress.String()] = &pingPongAccount{
- balance: amt,
- sk: root.Secrets(),
- pk: accountAddress,
+ ppa := &pingPongAccount{
+ balance: amt,
+ sk: secret,
+ pk: accountAddress,
+ }
+
+ pps.integrateAccountInfo(addr, ppa, ai)
+
+ if !pps.cfg.Quiet {
+ fmt.Printf("Found local account: %s\n", ppa.String())
+ }
+
+ pps.accounts[addr] = ppa
}
}
if !srcAcctPresent {
- if cfg.SrcAccount != "" {
- err = fmt.Errorf("specified Source Account '%s' not found", cfg.SrcAccount)
+ if pps.cfg.SrcAccount != "" {
+ err = fmt.Errorf("specified Source Account '%s' not found", pps.cfg.SrcAccount)
return
}
- if richestBalance >= cfg.MinAccountFunds {
- cfg.SrcAccount = richestAccount
+ if richestBalance >= pps.cfg.MinAccountFunds {
+ pps.cfg.SrcAccount = richestAccount
fmt.Printf("Identified richest account to use for Source Account: %s -> %v\n", richestAccount, richestBalance)
} else {
- err = fmt.Errorf("no accounts found with sufficient stake (> %d)", cfg.MinAccountFunds)
+ err = fmt.Errorf("no accounts found with sufficient stake (> %d)", pps.cfg.MinAccountFunds)
return
}
} else {
- fmt.Printf("Located Source Account: %s -> %v\n", cfg.SrcAccount, accounts[cfg.SrcAccount])
+ fmt.Printf("Located Source Account: %s -> %v\n", pps.cfg.SrcAccount, pps.accounts[pps.cfg.SrcAccount])
}
return
}
-// Prepare assets for asset transaction testing
-// Step 1) Create X assets for each of the participant accounts
-// Step 2) For each participant account, opt-in to assets of all other participant accounts
-// Step 3) Evenly distribute the assets across all participant accounts
-func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, client libgoal.Client) (resultAssetMaps map[uint64]v1.AssetParams, optIns map[uint64][]string, err error) {
- proto, err := getProto(client)
- if err != nil {
- return
+func (pps *WorkerState) integrateAccountInfo(addr string, ppa *pingPongAccount, ai v1.Account) {
+ ppa.balance = ai.Amount
+ // assets this account has created
+ for assetID, ap := range ai.AssetParams {
+ pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr)
+ pps.cinfo.AssetParams[assetID] = ap
}
+ // assets held
+ for assetID, holding := range ai.Assets {
+ pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr)
+ if ppa.holdings == nil {
+ ppa.holdings = make(map[uint64]uint64)
+ }
+ ppa.holdings[assetID] = holding.Amount
+ }
+ // apps created by this account
+ for appID, ap := range ai.AppParams {
+ pps.cinfo.OptIns[appID] = uniqueAppend(pps.cinfo.OptIns[appID], addr)
+ pps.cinfo.AppParams[appID] = ap
+ }
+ // apps opted into
+ for appID := range ai.AppLocalStates {
+ pps.cinfo.OptIns[appID] = uniqueAppend(pps.cinfo.OptIns[appID], addr)
+ }
+}
- resultAssetMaps = make(map[uint64]v1.AssetParams)
+type assetopti struct {
+ assetID uint64
+ params v1.AssetParams // TODO: switch to v2 API
+ optins []string // addr strings
+}
- // optIns contains own and explicitly opted-in assets
- optIns = make(map[uint64][]string)
- numCreatedAssetsByAddr := make(map[string]int, len(accounts))
+type assetSet []assetopti
- nextSendTime := time.Now()
+// Len is part of sort.Interface
+func (as *assetSet) Len() int {
+ return len(*as)
+}
- // 1) Create X assets for each of the participant accounts
- for addr := range accounts {
- if addr == pps.cfg.SrcAccount {
- continue
- }
- addrAccount, addrErr := client.AccountInformation(addr)
- if addrErr != nil {
- fmt.Printf("Cannot lookup source account %v\n", addr)
- err = addrErr
- return
- }
+// Less is part of sort.Interface
+// This is a reversed sort, higher values first
+func (as *assetSet) Less(a, b int) bool {
+ return len((*as)[a].optins) > len((*as)[b].optins)
+}
- toCreate := int(pps.cfg.NumAsset) - len(addrAccount.AssetParams)
- numCreatedAssetsByAddr[addr] = toCreate
+// Swap is part of sort.Interface
+func (as *assetSet) Swap(a, b int) {
+ t := (*as)[a]
+ (*as)[a] = (*as)[b]
+ (*as)[b] = t
+}
- fmt.Printf("Creating %v create asset transaction for account %v \n", toCreate, addr)
- fmt.Printf("cfg.NumAsset %v, addrAccount.AssetParams %v\n", pps.cfg.NumAsset, addrAccount.AssetParams)
+func (pps *WorkerState) prepareAssets(client *libgoal.Client) (err error) {
+ if pps.cinfo.AssetParams == nil {
+ pps.cinfo.AssetParams = make(map[uint64]v1.AssetParams)
+ }
+ if pps.cinfo.OptIns == nil {
+ pps.cinfo.OptIns = make(map[uint64][]string)
+ }
- totalSupply := pps.cfg.MinAccountAsset * uint64(pps.cfg.NumPartAccounts) * 9 * uint64(pps.cfg.GroupSize) * uint64(pps.cfg.RefreshTime.Seconds()) / pps.cfg.TxnPerSec
+ // create new assets as needed
+ err = pps.makeNewAssets(client)
+ if err != nil {
+ return
+ }
- // create assets in participant account
- for i := 0; i < toCreate; i++ {
- var metaLen = 32
- meta := make([]byte, metaLen)
- crypto.RandBytes(meta[:])
+ // find the most-opted-in assets to work with
+ assets := make([]assetopti, len(pps.cinfo.AssetParams))
+ pos := 0
+ for assetID, params := range pps.cinfo.AssetParams {
+ assets[pos].assetID = assetID
+ assets[pos].params = params
+ assets[pos].optins = pps.cinfo.OptIns[assetID]
+ pos++
+ }
+ ta := assetSet(assets)
+ sort.Sort(&ta)
+ if len(assets) > int(pps.cfg.NumAsset) {
+ assets = assets[:pps.cfg.NumAsset]
+ nap := make(map[uint64]v1.AssetParams, pps.cfg.NumAsset)
+ for _, asset := range assets {
+ nap[asset.assetID] = asset.params
+ }
+ pps.cinfo.AssetParams = nap
+ }
+
+ // opt-in more accounts as needed
+ for assetID := range pps.cinfo.AssetParams {
+ for addr, acct := range pps.accounts {
+ _, has := acct.holdings[assetID]
+ if !has {
+ tx, sendErr := client.MakeUnsignedAssetSendTx(assetID, 0, addr, "", "")
+ if sendErr != nil {
+ fmt.Printf("Cannot initiate asset optin %v in account %v\n", assetID, addr)
+ err = sendErr
+ continue
+ }
- if totalSupply < pps.cfg.MinAccountAsset { // overflow
- fmt.Printf("Too many NumPartAccounts\n")
- return
- }
- assetName := fmt.Sprintf("pong%d", i)
- if !pps.cfg.Quiet {
- fmt.Printf("Creating asset %s\n", assetName)
- }
- tx, createErr := client.MakeUnsignedAssetCreateTx(totalSupply, false, addr, addr, addr, addr, "ping", assetName, "", meta, 0)
- if createErr != nil {
- fmt.Printf("Cannot make asset create txn with meta %v\n", meta)
- err = createErr
- return
- }
- tx, err = client.FillUnsignedTxTemplate(addr, 0, 0, pps.cfg.MaxFee, tx)
- if err != nil {
- fmt.Printf("Cannot fill asset creation txn\n")
- return
- }
- tx.Note = pps.makeNextUniqueNoteField()
- schedule(pps.cfg.TxnPerSec, &nextSendTime)
- _, err = signAndBroadcastTransaction(accounts[addr], tx, client)
- if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset creation failed with error %v\n", err)
- return
- }
- }
- }
+ tx, err = client.FillUnsignedTxTemplate(addr, 0, 0, pps.cfg.MaxFee, tx)
+ if err != nil {
+ fmt.Printf("Cannot fill asset optin %v in account %v\n", assetID, addr)
+ continue
+ }
+ tx.Note = pps.makeNextUniqueNoteField()
- // wait until all the assets created
- allAssets := make(map[uint64]string, int(pps.cfg.NumAsset)*len(accounts))
- for addr := range accounts {
- if addr == pps.cfg.SrcAccount {
- continue
- }
- var account v1.Account
- deadline := time.Now().Add(3 * time.Minute)
- for {
- account, err = client.AccountInformation(addr)
- if err != nil {
- fmt.Printf("Warning: cannot lookup source account after assets creation")
- time.Sleep(1 * time.Second)
- continue
- }
- if len(account.AssetParams) >= numCreatedAssetsByAddr[addr] {
- break
- }
- if time.Now().After(deadline) {
- err = fmt.Errorf("asset creation took too long")
- fmt.Printf("Error: %s\n", err.Error())
- return
+ pps.schedule(1)
+ _, err = signAndBroadcastTransaction(acct, tx, client)
+ if err != nil {
+ _, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset optin failed with error %v\n", err)
+ continue
+ }
+ pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr)
}
- waitForNextRoundOrSleep(client, 500*time.Millisecond)
- }
- assetParams := account.AssetParams
- if !pps.cfg.Quiet {
- fmt.Printf("Configured %d assets %+v\n", len(assetParams), assetParams)
- }
- // add own asset to opt-ins since asset creators are auto-opted in
- for k := range account.AssetParams {
- optIns[k] = append(optIns[k], addr)
- allAssets[k] = addr
}
}
- // optInsByAddr tracks only explicitly opted-in assetsA
- optInsByAddr := make(map[string]map[uint64]bool)
+ // Could distribute value here, but just waits till constructAssetTxn()
+ return
+}
- // 2) For each participant account, opt-in up to proto.MaxAssetsPerAccount assets of all other participant accounts
- for addr := range accounts {
- if addr == pps.cfg.SrcAccount {
- continue
+const totalSupply = 10_000_000_000_000_000
+
+func (pps *WorkerState) makeNewAssets(client *libgoal.Client) (err error) {
+ if len(pps.cinfo.AssetParams) >= int(pps.cfg.NumAsset) {
+ return
+ }
+ assetsNeeded := int(pps.cfg.NumAsset) - len(pps.cinfo.AssetParams)
+ newAssetAddrs := make(map[string]*pingPongAccount, assetsNeeded)
+ for addr, acct := range pps.accounts {
+ if assetsNeeded <= 0 {
+ break
}
+ assetsNeeded--
+ var meta [32]byte
+ crypto.RandBytes(meta[:])
+ assetName := fmt.Sprintf("pong%d_%d", len(pps.cinfo.AssetParams), rand.Intn(8999)+1000)
if !pps.cfg.Quiet {
- fmt.Printf("Opting to account %v\n", addr)
+ fmt.Printf("Creating asset %s\n", assetName)
}
-
- acct, addrErr := client.AccountInformation(addr)
- if addrErr != nil {
- fmt.Printf("Cannot lookup optin account\n")
- err = addrErr
+ tx, createErr := client.MakeUnsignedAssetCreateTx(totalSupply, false, addr, addr, addr, addr, "ping", assetName, "", meta[:], 0)
+ if createErr != nil {
+ fmt.Printf("Cannot make asset create txn with meta %v\n", meta)
+ err = createErr
return
}
- maxAssetsPerAccount := proto.MaxAssetsPerAccount
- // TODO : given that we've added unlimited asset support, we should revise this
- // code so that we'll have control on how many asset/account we want to create.
- // for now, I'm going to keep the previous max values until we have refactored this code.
- if maxAssetsPerAccount == 0 {
- maxAssetsPerAccount = config.Consensus[protocol.ConsensusV30].MaxAssetsPerAccount
- }
- numSlots := maxAssetsPerAccount - len(acct.Assets)
- optInsByAddr[addr] = make(map[uint64]bool)
- for k, creator := range allAssets {
- if creator == addr {
- continue
- }
- // do we have any more asset slots for this?
- if numSlots <= 0 {
- break
- }
- numSlots--
-
- // opt-in asset k for addr
- tx, sendErr := client.MakeUnsignedAssetSendTx(k, 0, addr, "", "")
- if sendErr != nil {
- fmt.Printf("Cannot initiate asset optin %v in account %v\n", k, addr)
- err = sendErr
- return
- }
-
- tx, err = client.FillUnsignedTxTemplate(addr, 0, 0, pps.cfg.MaxFee, tx)
- if err != nil {
- fmt.Printf("Cannot fill asset optin %v in account %v\n", k, addr)
- return
- }
- tx.Note = pps.makeNextUniqueNoteField()
-
- schedule(pps.cfg.TxnPerSec, &nextSendTime)
- _, err = signAndBroadcastTransaction(accounts[addr], tx, client)
- if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset optin failed with error %v\n", err)
- return
- }
- optIns[k] = append(optIns[k], addr)
- optInsByAddr[addr][k] = true
+ tx, err = client.FillUnsignedTxTemplate(addr, 0, 0, pps.cfg.MaxFee, tx)
+ if err != nil {
+ fmt.Printf("Cannot fill asset creation txn\n")
+ return
}
- }
-
- // wait until all opt-ins completed
- waitForNextRoundOrSleep(client, 500*time.Millisecond)
- for addr := range accounts {
- if addr == pps.cfg.SrcAccount {
- continue
+ tx.Note = pps.makeNextUniqueNoteField()
+ pps.schedule(1)
+ _, err = signAndBroadcastTransaction(pps.accounts[addr], tx, client)
+ if err != nil {
+ _, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset creation failed with error %v\n", err)
+ return
}
- expectedAssets := numCreatedAssetsByAddr[addr] + len(optInsByAddr[addr])
- var account v1.Account
- deadline := time.Now().Add(3 * time.Minute)
- for {
- account, err = client.AccountInformation(addr)
+ newAssetAddrs[addr] = acct
+ }
+ // wait for new assets to be created, fetch account data for them
+ newAssets := make(map[uint64]v1.AssetParams, assetsNeeded)
+ timeout := time.Now().Add(10 * time.Second)
+ for len(newAssets) < assetsNeeded {
+ for addr, acct := range newAssetAddrs {
+ // TODO: switch to v2 API
+ ai, err := client.AccountInformation(addr)
if err != nil {
- fmt.Printf("Warning: cannot lookup source account after assets opt in")
+ fmt.Printf("Warning: cannot lookup source account after assets creation")
time.Sleep(1 * time.Second)
continue
}
- if len(account.Assets) == expectedAssets {
- break
- } else if len(account.Assets) > expectedAssets {
- err = fmt.Errorf("account %v has too many assets %d > %d ", addr, len(account.Assets), expectedAssets)
- return
- }
-
- if time.Now().After(deadline) {
- err = fmt.Errorf("asset opting in took too long")
- fmt.Printf("Error: %s\n", err.Error())
- return
- }
- waitForNextRoundOrSleep(client, 500*time.Millisecond)
- }
- }
-
- // Step 3) Evenly distribute the assets across all opted-in accounts
- for k, creator := range allAssets {
- if !pps.cfg.Quiet {
- fmt.Printf("Distributing asset %+v from account %v\n", k, creator)
- }
- creatorAccount, creatorErr := client.AccountInformation(creator)
- if creatorErr != nil {
- fmt.Printf("Cannot lookup source account\n")
- err = creatorErr
- return
- }
- assetParams := creatorAccount.AssetParams
-
- for _, addr := range optIns[k] {
- assetAmt := assetParams[k].Total / uint64(len(optIns[k]))
- if !pps.cfg.Quiet {
- fmt.Printf("Distributing assets from %v to %v \n", creator, addr)
- }
-
- tx, sendErr := client.MakeUnsignedAssetSendTx(k, assetAmt, addr, "", "")
- if sendErr != nil {
- _, _ = fmt.Fprintf(os.Stdout, "error making unsigned asset send tx %v\n", sendErr)
- err = fmt.Errorf("error making unsigned asset send tx : %w", sendErr)
- return
- }
- tx.Note = pps.makeNextUniqueNoteField()
- tx, sendErr = client.FillUnsignedTxTemplate(creator, 0, 0, pps.cfg.MaxFee, tx)
- if sendErr != nil {
- _, _ = fmt.Fprintf(os.Stdout, "error making unsigned asset send tx %v\n", sendErr)
- err = fmt.Errorf("error making unsigned asset send tx : %w", sendErr)
- return
- }
- tx.LastValid = tx.FirstValid + 5
- if pps.cfg.MaxFee == 0 {
- var suggestedFee uint64
- suggestedFee, err = client.SuggestedFee()
- if err != nil {
- _, _ = fmt.Fprintf(os.Stdout, "error retrieving suggestedFee: %v\n", err)
- return
- }
- if suggestedFee > tx.Fee.Raw {
- tx.Fee.Raw = suggestedFee
+ for assetID, ap := range ai.AssetParams {
+ pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr)
+ _, has := pps.cinfo.AssetParams[assetID]
+ if !has {
+ newAssets[assetID] = ap
}
}
-
- schedule(pps.cfg.TxnPerSec, &nextSendTime)
- _, err = signAndBroadcastTransaction(accounts[creator], tx, client)
- if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset distribution failed with error %v\n", err)
- return
+ for assetID, holding := range ai.Assets {
+ pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr)
+ if acct.holdings == nil {
+ acct.holdings = make(map[uint64]uint64)
+ }
+ acct.holdings[assetID] = holding.Amount
}
}
- // append the asset to the result assets
- resultAssetMaps[k] = assetParams[k]
- }
-
- // wait for all transfers acceptance
- waitForNextRoundOrSleep(client, 500*time.Millisecond)
- deadline := time.Now().Add(3 * time.Minute)
- var pending v1.PendingTransactions
- for {
- pending, err = client.GetPendingTransactions(100)
- if err != nil {
- fmt.Printf("Warning: cannot get pending txn")
- time.Sleep(1 * time.Second)
- continue
- }
- if pending.TotalTxns == 0 {
+ if time.Now().After(timeout) {
+ // complain, but try to keep running on what assets we have
+ log.Printf("WARNING took too long to create new assets")
+ // TODO: error?
break
}
- if time.Now().After(deadline) {
- fmt.Printf("Warning: assets distribution took too long")
- break
- }
- waitForNextRoundOrSleep(client, 500*time.Millisecond)
}
- return
+ for assetID, ap := range newAssets {
+ pps.cinfo.AssetParams[assetID] = ap
+ }
+ return nil
}
-func signAndBroadcastTransaction(senderAccount *pingPongAccount, tx transactions.Transaction, client libgoal.Client) (txID string, err error) {
+func signAndBroadcastTransaction(senderAccount *pingPongAccount, tx transactions.Transaction, client *libgoal.Client) (txID string, err error) {
signedTx := tx.Sign(senderAccount.sk)
txID, err = client.BroadcastTransaction(signedTx)
if err != nil {
@@ -581,7 +596,7 @@ func genAppProgram(numOps uint32, numHashes uint32, hashSize string, numGlobalKe
return ops.Program, progAsm
}
-func waitForNextRoundOrSleep(client libgoal.Client, waitTime time.Duration) {
+func waitForNextRoundOrSleep(client *libgoal.Client, waitTime time.Duration) {
status, err := client.Status()
if err == nil {
status, err = client.WaitForRound(status.LastRound)
@@ -592,7 +607,7 @@ func waitForNextRoundOrSleep(client libgoal.Client, waitTime time.Duration) {
time.Sleep(waitTime)
}
-func (pps *WorkerState) sendAsGroup(txgroup []transactions.Transaction, client libgoal.Client, senders []string) (err error) {
+func (pps *WorkerState) sendAsGroup(txgroup []transactions.Transaction, client *libgoal.Client, senders []string) (err error) {
if len(txgroup) == 0 {
err = fmt.Errorf("sendAsGroup: empty group")
return
@@ -624,7 +639,7 @@ repeat:
var proto *config.ConsensusParams
-func getProto(client libgoal.Client) (config.ConsensusParams, error) {
+func getProto(client *libgoal.Client) (config.ConsensusParams, error) {
if proto == nil {
var err error
status, err := client.Status()
@@ -641,207 +656,136 @@ func getProto(client libgoal.Client) (config.ConsensusParams, error) {
return *proto, nil
}
-func (pps *WorkerState) prepareApps(accounts map[string]*pingPongAccount, client libgoal.Client, cfg PpConfig) (appParams map[uint64]v1.AppParams, optIns map[uint64][]string, err error) {
- proto, err := getProto(client)
- if err != nil {
- return
+// ensure that cfg.NumPartAccounts have cfg.NumAppOptIn opted in selecting from cfg.NumApp
+func (pps *WorkerState) prepareApps(client *libgoal.Client) (err error) {
+ if pps.cinfo.AppParams == nil {
+ pps.cinfo.AppParams = make(map[uint64]v1.AppParams)
}
- toCreate := int(cfg.NumApp)
- appsPerAcct := proto.MaxAppsCreated
- // TODO : given that we've added unlimited app support, we should revise this
- // code so that we'll have control on how many app/account we want to create.
- // for now, I'm going to keep the previous max values until we have refactored this code.
- if appsPerAcct == 0 {
- appsPerAcct = config.Consensus[protocol.ConsensusV30].MaxAppsCreated
+ if pps.cinfo.OptIns == nil {
+ pps.cinfo.OptIns = make(map[uint64][]string, pps.cfg.NumAsset+pps.cfg.NumApp)
}
- // create min(groupSize, maxAppsPerAcct) per account to optimize sending in batches
- groupSize := proto.MaxTxGroupSize
- if appsPerAcct > groupSize {
- appsPerAcct = groupSize
- }
-
- acctNeeded := toCreate / appsPerAcct
- if toCreate%appsPerAcct != 0 {
- acctNeeded++
- }
- if acctNeeded >= len(accounts) { // >= because cfg.SrcAccount is skipped
- err = fmt.Errorf("need %d accts to create %d apps but got only %d accts", acctNeeded, toCreate, len(accounts))
- return
- }
- maxOptIn := uint32(config.Consensus[protocol.ConsensusCurrentVersion].MaxAppsOptedIn)
- if maxOptIn > 0 && cfg.NumAppOptIn > maxOptIn {
- err = fmt.Errorf("each acct can only opt in to %d but %d requested", maxOptIn, cfg.NumAppOptIn)
- return
- }
-
- appAccounts := make([]v1.Account, len(accounts))
- accountsCount := 0
- for acctAddr := range accounts {
- if acctAddr == cfg.SrcAccount {
- continue
+ // generate new apps
+ var txgroup []transactions.Transaction
+ var senders []string
+ for addr, acct := range pps.accounts {
+ if len(pps.cinfo.AppParams) >= int(pps.cfg.NumApp) {
+ break
}
- appAccounts[accountsCount], err = client.AccountInformation(acctAddr)
+ var tx transactions.Transaction
+ tx, err = pps.newApp(addr, client)
if err != nil {
- fmt.Printf("Warning, cannot lookup acctAddr account %s", acctAddr)
return
}
- accountsCount++
- if accountsCount == acctNeeded {
- break
+ acct.addBalance(-int64(pps.cfg.MaxFee))
+ txgroup = append(txgroup, tx)
+ senders = append(senders, addr)
+ if len(txgroup) == int(pps.cfg.GroupSize) {
+ pps.schedule(len(txgroup))
+ err = pps.sendAsGroup(txgroup, client, senders)
+ if err != nil {
+ return
+ }
+ txgroup = txgroup[:0]
+ senders = senders[:0]
}
}
- appAccounts = appAccounts[:accountsCount]
-
- if !cfg.Quiet {
- fmt.Printf("Selected temp account:\n")
- for _, acct := range appAccounts {
- fmt.Printf("%s\n", acct.Address)
+ if len(txgroup) > 0 {
+ pps.schedule(len(txgroup))
+ err = pps.sendAsGroup(txgroup, client, senders)
+ if err != nil {
+ return
}
+ txgroup = txgroup[:0]
+ senders = senders[:0]
}
- // generate app program with roughly some number of operations
- prog, asm := genAppProgram(cfg.AppProgOps, cfg.AppProgHashes, cfg.AppProgHashSize, cfg.AppGlobKeys, cfg.AppLocalKeys)
- if !cfg.Quiet {
- fmt.Printf("generated program: \n%s\n", asm)
- }
- globSchema := basics.StateSchema{NumByteSlice: proto.MaxGlobalSchemaEntries}
- locSchema := basics.StateSchema{NumByteSlice: proto.MaxLocalSchemaEntries}
-
- // for each account, store the number of expected applications.
- accountsApplicationCount := make(map[string]int)
-
- // create apps
- for idx, appAccount := range appAccounts {
- begin := idx * appsPerAcct
- end := (idx + 1) * appsPerAcct
- if end > toCreate {
- end = toCreate
- }
-
- var txgroup []transactions.Transaction
- var senders []string
- for i := begin; i < end; i++ {
+ // opt-in more accounts to apps
+ acctPerApp := (pps.cfg.NumAppOptIn * pps.cfg.NumPartAccounts) / pps.cfg.NumApp
+ for appid := range pps.cinfo.AppParams {
+ optins := pps.cinfo.OptIns[appid]
+ for addr, acct := range pps.accounts {
+ if len(optins) >= int(acctPerApp) {
+ break
+ }
+ // opt-in the account to the app
var tx transactions.Transaction
-
- tx, err = client.MakeUnsignedAppCreateTx(transactions.NoOpOC, prog, prog, globSchema, locSchema, nil, nil, nil, nil, 0)
+ tx, err = pps.appOptIn(addr, appid, client)
if err != nil {
- fmt.Printf("Cannot create app txn\n")
- panic(err)
- // TODO : if we fail here for too long, we should re-create new accounts, etc.
+ return
}
-
- tx, err = client.FillUnsignedTxTemplate(appAccount.Address, 0, 0, cfg.MaxFee, tx)
- if err != nil {
- fmt.Printf("Cannot fill app creation txn\n")
- panic(err)
- // TODO : if we fail here for too long, we should re-create new accounts, etc.
+ acct.addBalance(-int64(pps.cfg.MaxFee))
+ txgroup = append(txgroup, tx)
+ senders = append(senders, addr)
+ if len(txgroup) == int(pps.cfg.GroupSize) {
+ pps.schedule(len(txgroup))
+ err = pps.sendAsGroup(txgroup, client, senders)
+ if err != nil {
+ return
+ }
+ txgroup = txgroup[:0]
+ senders = senders[:0]
}
- // Ensure different txids
- tx.Note = pps.makeNextUniqueNoteField()
-
- txgroup = append(txgroup, tx)
- accounts[appAccount.Address].addBalance(-int64(tx.Fee.Raw))
- senders = append(senders, appAccount.Address)
- accountsApplicationCount[appAccount.Address]++
}
-
+ }
+ if len(txgroup) > 0 {
+ pps.schedule(len(txgroup))
err = pps.sendAsGroup(txgroup, client, senders)
if err != nil {
- balance, err2 := client.GetBalance(appAccount.Address)
- if err2 == nil {
- fmt.Printf("account %v balance is %d, logged balance is %d\n", appAccount.Address, balance, accounts[appAccount.Address].getBalance())
- } else {
- fmt.Printf("account %v balance cannot be determined : %v\n", appAccount.Address, err2)
- }
return
}
- if !cfg.Quiet {
- fmt.Printf("Created new %d apps\n", len(txgroup))
- }
+ //txgroup = txgroup[:0]
+ //senders = senders[:0]
}
+ return
+}
- // get these apps
- var aidxs []uint64
- appParams = make(map[uint64]v1.AppParams)
- for _, appAccount := range appAccounts {
- var account v1.Account
- for {
- account, err = client.AccountInformation(appAccount.Address)
- if err != nil {
- fmt.Printf("Warning, cannot lookup source account")
- return
- }
- if len(account.AppParams) >= accountsApplicationCount[appAccount.Address] {
- break
- }
- waitForNextRoundOrSleep(client, 500*time.Millisecond)
- // TODO : if we fail here for too long, we should re-create new accounts, etc.
- }
- for idx, v := range account.AppParams {
- appParams[idx] = v
- aidxs = append(aidxs, idx)
- }
+func (pps *WorkerState) newApp(addr string, client *libgoal.Client) (tx transactions.Transaction, err error) {
+ // generate app program with roughly some number of operations
+ prog, asm := genAppProgram(pps.cfg.AppProgOps, pps.cfg.AppProgHashes, pps.cfg.AppProgHashSize, pps.cfg.AppGlobKeys, pps.cfg.AppLocalKeys)
+ if !pps.cfg.Quiet {
+ fmt.Printf("generated program: \n%s\n", asm)
}
- if len(aidxs) != len(appParams) {
- err = fmt.Errorf("duplicates in aidxs, %d != %d", len(aidxs), len(appParams))
- return
+ globSchema := basics.StateSchema{NumByteSlice: proto.MaxGlobalSchemaEntries}
+ locSchema := basics.StateSchema{NumByteSlice: proto.MaxLocalSchemaEntries}
+
+ tx, err = client.MakeUnsignedAppCreateTx(transactions.NoOpOC, prog, prog, globSchema, locSchema, nil, nil, nil, nil, 0)
+ if err != nil {
+ fmt.Printf("Cannot create app txn\n")
+ panic(err)
+ // TODO : if we fail here for too long, we should re-create new accounts, etc.
}
- // time to opt in to these apps
- if cfg.NumAppOptIn > 0 {
- optIns = make(map[uint64][]string)
- for addr := range accounts {
- if addr == cfg.SrcAccount {
- continue
- }
- var txgroup []transactions.Transaction
- var senders []string
- permAppIndices := rand.Perm(len(aidxs))
- for i := uint32(0); i < cfg.NumAppOptIn; i++ {
- j := permAppIndices[i]
- aidx := aidxs[j]
- var tx transactions.Transaction
- tx, err = client.MakeUnsignedAppOptInTx(aidx, nil, nil, nil, nil)
- if err != nil {
- fmt.Printf("Cannot create app txn\n")
- panic(err)
- }
+ tx, err = client.FillUnsignedTxTemplate(addr, 0, 0, pps.cfg.MaxFee, tx)
+ if err != nil {
+ fmt.Printf("Cannot fill app creation txn\n")
+ panic(err)
+ // TODO : if we fail here for too long, we should re-create new accounts, etc.
+ }
- tx, err = client.FillUnsignedTxTemplate(addr, 0, 0, cfg.MaxFee, tx)
- if err != nil {
- fmt.Printf("Cannot fill app creation txn\n")
- panic(err)
- }
+ // Ensure different txids
+ tx.Note = pps.makeNextUniqueNoteField()
- // Ensure different txids
- tx.Note = pps.makeNextUniqueNoteField()
+ return tx, err
+}
- optIns[aidx] = append(optIns[aidx], addr)
-
- txgroup = append(txgroup, tx)
- senders = append(senders, addr)
- if len(txgroup) == groupSize {
- err = pps.sendAsGroup(txgroup, client, senders)
- if err != nil {
- return
- }
- txgroup = txgroup[:0]
- senders = senders[:0]
- }
- }
- // broadcast leftovers
- if len(txgroup) > 0 {
- err = pps.sendAsGroup(txgroup, client, senders)
- if err != nil {
- return
- }
- }
- }
+func (pps *WorkerState) appOptIn(addr string, appID uint64, client *libgoal.Client) (tx transactions.Transaction, err error) {
+ tx, err = client.MakeUnsignedAppOptInTx(appID, nil, nil, nil, nil)
+ if err != nil {
+ fmt.Printf("Cannot create app txn\n")
+ panic(err)
}
+ tx, err = client.FillUnsignedTxTemplate(addr, 0, 0, pps.cfg.MaxFee, tx)
+ if err != nil {
+ fmt.Printf("Cannot fill app creation txn\n")
+ panic(err)
+ }
+
+ // Ensure different txids
+ tx.Note = pps.makeNextUniqueNoteField()
return
}
@@ -872,17 +816,28 @@ func takeTopAccounts(allAccounts map[string]*pingPongAccount, numAccounts uint32
return
}
-func generateAccounts(allAccounts map[string]*pingPongAccount, numAccounts uint32) {
+// generate random ephemeral accounts
+// TODO: don't do this and _always_ use the deterministic account mechanism?
+func (pps *WorkerState) generateAccounts() {
var seed crypto.Seed
- for accountsRequired := int(numAccounts+1) - len(allAccounts); accountsRequired > 0; accountsRequired-- {
+ for accountsRequired := int(pps.cfg.NumPartAccounts+1) - len(pps.accounts); accountsRequired > 0; accountsRequired-- {
crypto.RandBytes(seed[:])
privateKey := crypto.GenerateSignatureSecrets(seed)
publicKey := basics.Address(privateKey.SignatureVerifier)
- allAccounts[publicKey.String()] = &pingPongAccount{
+ pps.accounts[publicKey.String()] = &pingPongAccount{
sk: privateKey,
pk: publicKey,
}
}
}
+
+func uniqueAppend(they []string, x string) []string {
+ for _, v := range they {
+ if v == x {
+ return they
+ }
+ }
+ return append(they, x)
+}
diff --git a/shared/pingpong/accounts_test.go b/shared/pingpong/accounts_test.go
new file mode 100644
index 000000000..7f2f0a737
--- /dev/null
+++ b/shared/pingpong/accounts_test.go
@@ -0,0 +1,60 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package pingpong
+
+import (
+ "encoding/binary"
+ "testing"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/stretchr/testify/assert"
+)
+
+func makeKeyFromSeed(i uint64) *crypto.SignatureSecrets {
+ var seed crypto.Seed
+ binary.LittleEndian.PutUint64(seed[:], i)
+ s := crypto.GenerateSignatureSecrets(seed)
+ return s
+}
+
+func TestDeterministicAccounts(t *testing.T) {
+ initCfg := PpConfig{
+ NumPartAccounts: 20,
+ DeterministicKeys: true,
+ GeneratedAccountsCount: 100,
+ }
+
+ // created expected set of keys in a similar way as netgoal generate --deterministic
+ expectedPubKeys := make(map[crypto.PublicKey]*crypto.SignatureSecrets)
+ for i := 0; i < int(initCfg.GeneratedAccountsCount); i++ {
+ key := makeKeyFromSeed(uint64(i))
+ expectedPubKeys[key.SignatureVerifier] = key
+ }
+ assert.Len(t, expectedPubKeys, int(initCfg.GeneratedAccountsCount))
+
+ // call pingpong acct generator and assert its separately-generated secrets are equal
+ accountSecrets := deterministicAccounts(initCfg)
+ cnt := 0
+ for secret := range accountSecrets {
+ t.Log("Got address", basics.Address(secret.SignatureVerifier))
+ assert.Contains(t, expectedPubKeys, secret.SignatureVerifier)
+ assert.Equal(t, *expectedPubKeys[secret.SignatureVerifier], *secret)
+ cnt++
+ }
+ assert.Equal(t, int(initCfg.NumPartAccounts), cnt)
+}
diff --git a/shared/pingpong/config.go b/shared/pingpong/config.go
index db6cbb4ed..5b9224c01 100644
--- a/shared/pingpong/config.go
+++ b/shared/pingpong/config.go
@@ -18,6 +18,7 @@ package pingpong
import (
"encoding/json"
+ "fmt"
"io"
"os"
"time"
@@ -30,6 +31,7 @@ const ConfigFilename = "ppconfig.json"
// PpConfig defines configuration structure for
type PpConfig struct {
+ // SrcAccount is address to use as funding source for new accounts
SrcAccount string
RandomizeFee bool
RandomizeAmt bool
@@ -45,12 +47,19 @@ type PpConfig struct {
Quiet bool
RandomNote bool
RandomLease bool
- Program []byte
- LogicArgs [][]byte
- GroupSize uint32
- NumAsset uint32
+
+ Program []byte
+ LogicArgs [][]byte
+ ProgramProbability float64
+
+ GroupSize uint32
+ // NumAsset is the number of assets each account holds
+ NumAsset uint32
+ // MinAccountAsset
MinAccountAsset uint64
- NumApp uint32
+ // NumApp is the total number of apps to create
+ NumApp uint32
+ // NumAppOptIn is the number of apps each account opts in to
NumAppOptIn uint32
AppProgOps uint32
AppProgHashes uint32
@@ -64,6 +73,18 @@ type PpConfig struct {
NftAsaPerSecond uint32 // e.g. 100
NftAsaPerAccount uint32 // 0..999
NftAsaAccountInFlight uint32
+
+ // configuration related to using bootstrapped ledgers built by netgoal
+ // TODO: support generatedAssetsCount, generatedApplicationCount
+ DeterministicKeys bool
+ GeneratedAccountsCount uint32
+ GeneratedAccountSampleMethod string
+ GeneratedAccountsOffset uint32
+
+ WeightPayment float64
+ WeightAsset float64
+ WeightApp float64
+ WeightNFTCreation float64
}
// DefaultConfig object for Ping Pong
@@ -78,7 +99,7 @@ var DefaultConfig = PpConfig{
TxnPerSec: 200,
NumPartAccounts: 10,
RunTime: 10 * time.Second,
- RefreshTime: 10 * time.Second,
+ RefreshTime: 3600 * time.Second,
MinAccountFunds: 100000,
GroupSize: 1,
NumAsset: 0,
@@ -90,6 +111,8 @@ var DefaultConfig = PpConfig{
Rekey: false,
MaxRuntime: 0,
+ ProgramProbability: 1,
+
NftAsaAccountInFlight: 5,
NftAsaPerAccount: 900,
}
@@ -125,3 +148,50 @@ func (cfg PpConfig) Dump(stream io.Writer) {
enc := codecs.NewFormattedJSONEncoder(stream)
enc.Encode(cfg)
}
+
+// SetDefaultWeights ensures a reasonable configuration of traffic generation weights.
+// With no weights set, and old args about what mode to run, each activated traffic type gets a weight of 1.
+// With no weights set and some activated traffic type other than payment, payment gets deactivated (zero weight) to maintain compatibility with prior behavior. WeightPayment must be explicitly set to add it to the mix if other modes are activated.
+func (cfg *PpConfig) SetDefaultWeights() {
+ const epsilon = 0.0000001
+ if cfg.WeightPayment+cfg.WeightAsset+cfg.WeightApp+cfg.WeightNFTCreation < epsilon {
+ // set up some sensible run probability weights
+ if cfg.NumAsset > 0 && cfg.WeightAsset < epsilon {
+ cfg.WeightAsset = 1
+ }
+ if cfg.NumApp > 0 && cfg.WeightApp < epsilon {
+ cfg.WeightApp = 1
+ }
+ if cfg.NftAsaPerSecond > 0 && cfg.WeightNFTCreation < epsilon {
+ cfg.WeightNFTCreation = 1
+ }
+ if cfg.NumAsset == 0 && cfg.NumApp == 0 && cfg.NftAsaPerSecond == 0 && cfg.WeightPayment < epsilon {
+ // backwards compatibility, if a mode is specified we wouldn't run payment traffic, so only set it when no mode is specified
+ cfg.WeightPayment = 1
+ }
+ }
+}
+
+var accountSampleMethods = []string{
+ "",
+ "random",
+ "sequential",
+}
+
+// Check returns an error if config is invalid.
+func (cfg *PpConfig) Check() error {
+ sampleOk := false
+ for _, v := range accountSampleMethods {
+ if v == cfg.GeneratedAccountSampleMethod {
+ sampleOk = true
+ break
+ }
+ }
+ if !sampleOk {
+ return fmt.Errorf("unknown GeneratedAccountSampleMethod: %s", cfg.GeneratedAccountSampleMethod)
+ }
+ if cfg.DeterministicKeys && (cfg.GeneratedAccountsOffset+cfg.NumPartAccounts > cfg.GeneratedAccountsCount) {
+ return fmt.Errorf("(GeneratedAccountsOffset %d) + (NumPartAccounts %d) > (GeneratedAccountsCount %d)", cfg.GeneratedAccountsOffset, cfg.NumPartAccounts, cfg.GeneratedAccountsCount)
+ }
+ return nil
+}
diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go
index 1592ba6c9..e0ee6812a 100644
--- a/shared/pingpong/pingpong.go
+++ b/shared/pingpong/pingpong.go
@@ -19,11 +19,13 @@ package pingpong
import (
"context"
"encoding/binary"
+ "errors"
"fmt"
"math"
"math/rand"
"os"
"strings"
+ "sync/atomic"
"time"
"github.com/algorand/go-deadlock"
@@ -36,7 +38,6 @@ import (
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/util"
)
// CreatablesInfo has information about created assets, apps and opting in
@@ -49,145 +50,146 @@ type CreatablesInfo struct {
// pingPongAccount represents the account state for each account in the pingpong application
// This includes the current balance and public/private keys tied to the account
type pingPongAccount struct {
+ balance uint64
+ balanceRound uint64
+
deadlock.Mutex
sk *crypto.SignatureSecrets
pk basics.Address
- balance uint64
- balanceRound uint64
+ // asset holdings
+ holdings map[uint64]uint64
}
func (ppa *pingPongAccount) getBalance() uint64 {
- ppa.Lock()
- defer ppa.Unlock()
- return ppa.balance
+ return atomic.LoadUint64(&ppa.balance)
}
func (ppa *pingPongAccount) setBalance(balance uint64) {
+ atomic.StoreUint64(&ppa.balance, balance)
+}
+
+func (ppa *pingPongAccount) addBalance(offset int64) {
+ if offset >= 0 {
+ atomic.AddUint64(&ppa.balance, uint64(offset))
+ return
+ }
+ for {
+ v := atomic.LoadUint64(&ppa.balance)
+ nv := v - uint64(-offset)
+ done := atomic.CompareAndSwapUint64(&ppa.balance, v, nv)
+ if done {
+ return
+ }
+ }
+}
+
+func (ppa *pingPongAccount) getAsset(aid uint64) (v uint64, ok bool) {
+ ppa.Lock()
+ defer ppa.Unlock()
+ v, ok = ppa.holdings[aid]
+ return
+}
+func (ppa *pingPongAccount) setAsset(aid, value uint64) {
+ ppa.Lock()
+ defer ppa.Unlock()
+ ppa.holdings[aid] = value
+}
+func (ppa *pingPongAccount) addAsset(aid uint64, dv int64) {
ppa.Lock()
defer ppa.Unlock()
- ppa.balance = balance
+ v := ppa.holdings[aid]
+ if dv >= 0 {
+ v += uint64(dv)
+ } else {
+ v -= uint64(-dv)
+ }
+ ppa.holdings[aid] = v
}
-func (ppa *pingPongAccount) addBalance(offset int64) {
+func (ppa *pingPongAccount) String() string {
ppa.Lock()
defer ppa.Unlock()
- ppa.balance = uint64(int64(ppa.balance) + offset)
+ var ow strings.Builder
+ fmt.Fprintf(&ow, "%s %d", ppa.pk.String(), ppa.balance)
+ if len(ppa.holdings) > 0 {
+ fmt.Fprintf(&ow, "[")
+ first := true
+ for assetID, av := range ppa.holdings {
+ if first {
+ first = false
+ } else {
+ fmt.Fprintf(&ow, ", ")
+ }
+ fmt.Fprintf(&ow, "a%d=%d", assetID, av)
+ }
+ fmt.Fprintf(&ow, "]")
+ }
+ return ow.String()
}
// WorkerState object holds a running pingpong worker
type WorkerState struct {
- cfg PpConfig
- accounts map[string]*pingPongAccount
- accountsMu deadlock.RWMutex
- cinfo CreatablesInfo
+ cfg PpConfig
+ accounts map[string]*pingPongAccount
+ cinfo CreatablesInfo
nftStartTime int64
localNftIndex uint64
nftHolders map[string]int
incTransactionSalt uint64
- muSuggestedParams deadlock.Mutex
- suggestedParams v1.TransactionParams
- pendingTxns v1.PendingTransactions
+ nextSendTime time.Time
+ scheduleActionTime time.Duration
+ scheduleCalls uint64
+ scheduleSteps uint64
+
+ refreshAddrs []string
+ refreshPos int
+
+ client *libgoal.Client
}
// PrepareAccounts to set up accounts and asset accounts required for Ping Pong run
-func (pps *WorkerState) PrepareAccounts(ac libgoal.Client) (err error) {
- pps.accounts, pps.cfg, err = pps.ensureAccounts(ac, pps.cfg)
+func (pps *WorkerState) PrepareAccounts(ac *libgoal.Client) (err error) {
+ pps.client = ac
+ pps.nextSendTime = time.Now()
+ durPerTxn := time.Second / time.Duration(pps.cfg.TxnPerSec)
+ fmt.Printf("duration per txn %s\n", durPerTxn)
+
+ err = pps.ensureAccounts(ac)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "ensure accounts failed %v\n", err)
return
}
- cfg := pps.cfg
- if cfg.NumAsset > 0 {
- // zero out max amount for asset transactions
- cfg.MaxAmt = 0
+ // create new ephemeral random accounts
+ pps.generateAccounts()
- var assetAccounts map[string]*pingPongAccount
- assetAccounts, err = pps.prepareNewAccounts(ac)
- if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "prepare new accounts failed: %v\n", err)
- return
- }
+ err = pps.fundAccounts(ac)
+ if err != nil {
+ _, _ = fmt.Fprintf(os.Stderr, "fund accounts failed %v\n", err)
+ return
+ }
- pps.cinfo.AssetParams, pps.cinfo.OptIns, err = pps.prepareAssets(assetAccounts, ac)
+ if pps.cfg.NumAsset > 0 {
+ err = pps.prepareAssets(ac)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "prepare assets failed %v\n", err)
return
}
-
- if !cfg.Quiet {
- for addr := range assetAccounts {
- if addr != pps.cfg.SrcAccount {
- fmt.Printf("final prepareAccounts, account addr: %s, balance: %d\n", addr, pps.accounts[addr].getBalance())
- }
- }
- }
- } else if cfg.NumApp > 0 {
- var appAccounts map[string]*pingPongAccount
- appAccounts, err = pps.prepareNewAccounts(ac)
- if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "prepare new accounts failed: %v\n", err)
- return
- }
- pps.cinfo.AppParams, pps.cinfo.OptIns, err = pps.prepareApps(appAccounts, ac, cfg)
- if err != nil {
- return
- }
- if !cfg.Quiet {
- for addr := range appAccounts {
- if addr != pps.cfg.SrcAccount {
- fmt.Printf("final prepareAccounts, account addr: %s, balance: %d\n", addr, pps.accounts[addr].getBalance())
- }
- }
- }
- } else {
- // If we have more accounts than requested, pick the top N (not including src)
- if len(pps.accounts) > int(cfg.NumPartAccounts+1) {
- fmt.Printf("Finding the richest %d accounts to use for transacting\n", cfg.NumPartAccounts)
- pps.accounts = takeTopAccounts(pps.accounts, cfg.NumPartAccounts, cfg.SrcAccount)
- } else {
- // Not enough accounts yet (or just enough). Create more if needed
- fmt.Printf("Not enough accounts - creating %d more\n", int(cfg.NumPartAccounts+1)-len(pps.accounts))
- generateAccounts(pps.accounts, cfg.NumPartAccounts)
- }
-
- err = pps.fundAccounts(pps.accounts, ac, cfg)
+ }
+ if pps.cfg.NumApp > 0 {
+ err = pps.prepareApps(ac)
if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "fund accounts failed %v\n", err)
return
}
- go pps.roundMonitor(ac)
}
-
- pps.cfg = cfg
- return
-}
-
-func (pps *WorkerState) prepareNewAccounts(client libgoal.Client) (newAccounts map[string]*pingPongAccount, err error) {
- // create new accounts for testing
- newAccounts = make(map[string]*pingPongAccount)
- generateAccounts(newAccounts, pps.cfg.NumPartAccounts)
- // copy the source account, as needed.
- if srcAcct, has := pps.accounts[pps.cfg.SrcAccount]; has {
- newAccounts[pps.cfg.SrcAccount] = srcAcct
- }
- pps.accounts = newAccounts
-
- err = pps.fundAccounts(newAccounts, client, pps.cfg)
- if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "fund accounts failed %v\n", err)
- return
- }
-
- go pps.roundMonitor(client)
return
}
// determine the min balance per participant account
-func computeAccountMinBalance(client libgoal.Client, cfg PpConfig) (fundingRequiredBalance uint64, runningRequiredBalance uint64, err error) {
+func computeAccountMinBalance(client *libgoal.Client, cfg PpConfig) (fundingRequiredBalance uint64, runningRequiredBalance uint64, err error) {
proto, err := getProto(client)
if err != nil {
return
@@ -207,17 +209,6 @@ func computeAccountMinBalance(client libgoal.Client, cfg PpConfig) (fundingRequi
fee *= uint64(cfg.GroupSize)
}
- if cfg.NumApp > 0 {
- amount := uint64(0)
-
- runningRequiredBalance = (amount + fee) * 10 * 2
- setupCost := uint64(proto.MaxTxGroupSize) * (uint64(proto.AppFlatParamsMinBalance*2) + fee)
- // todo: add the cfg.NumAppOptIn to the setup cost.
- fundingRequiredBalance = proto.MinBalance + cfg.MinAccountFunds + (amount+fee)*10*2*cfg.TxnPerSec*uint64(math.Ceil(cfg.RefreshTime.Seconds())) + setupCost
- fmt.Printf("required min balance for app accounts: %d\n", fundingRequiredBalance)
- return
- }
-
fundingRequiredBalance = minActiveAccountBalance
runningRequiredBalance = minActiveAccountBalance
@@ -263,21 +254,90 @@ func computeAccountMinBalance(client libgoal.Client, cfg PpConfig) (fundingRequi
return
}
-// Wait for `*nextSendTime` and update it afterwards.
-func schedule(tps uint64, nextSendTime *time.Time) {
- dur := time.Until(*nextSendTime)
- if dur > 0 {
- time.Sleep(dur)
+func (pps *WorkerState) scheduleAction() bool {
+ if pps.refreshPos >= len(pps.refreshAddrs) {
+ if pps.refreshAddrs == nil {
+ pps.refreshAddrs = make([]string, 0, len(pps.accounts))
+ } else {
+ pps.refreshAddrs = pps.refreshAddrs[:0]
+ }
+ for addr := range pps.accounts {
+ pps.refreshAddrs = append(pps.refreshAddrs, addr)
+ }
+ pps.refreshPos = 0
}
+ addr := pps.refreshAddrs[pps.refreshPos]
+ ai, err := pps.client.AccountInformation(addr)
+ if err == nil {
+ ppa := pps.accounts[addr]
- *nextSendTime = nextSendTime.Add(time.Second / time.Duration(tps))
+ pps.integrateAccountInfo(addr, ppa, ai)
+ } else {
+ if !pps.cfg.Quiet {
+ fmt.Printf("background refresh err: %v\n", err)
+ }
+ return false
+ }
+ pps.refreshPos++
+ return true
+}
+
+const durationEpsilon = time.Microsecond * 10
+const scheduleActionTimeAlpha = 6
+
+// schedule consuming n txn time slots
+func (pps *WorkerState) schedule(n int) {
+ pps.scheduleCalls++
+ now := time.Now()
+ ok := true
+ timePerStep := time.Second / time.Duration(pps.cfg.TxnPerSec)
+ nextSendTime := pps.nextSendTime
+ if n > 1 {
+ nextSendTime = nextSendTime.Add(timePerStep * time.Duration(n-1))
+ }
+ for {
+ if now.After(nextSendTime) {
+ break
+ }
+ dur := nextSendTime.Sub(now)
+ if dur < durationEpsilon {
+ break
+ }
+ if dur < pps.scheduleActionTime || !ok {
+ time.Sleep(dur)
+ now = time.Now()
+ } else {
+ ok = pps.scheduleAction()
+ nn := time.Now()
+ dt := nn.Sub(now)
+ // alpha blend to keep running approximation
+ pps.scheduleActionTime = ((pps.scheduleActionTime * scheduleActionTimeAlpha) + dt) / (scheduleActionTimeAlpha + 1)
+ now = nn
+ }
+ }
+
+ steps := 0
+ for now.After(nextSendTime) {
+ if steps > 0 {
+ dt := now.Sub(nextSendTime)
+ if dt < timePerStep/2 {
+ // good enough
+ break
+ }
+ }
+ pps.scheduleSteps++
+ nextSendTime = nextSendTime.Add(timePerStep)
+ steps++
+ }
+ pps.nextSendTime = nextSendTime
+ //fmt.Printf("schedule now=%s next=%s\n", now, pps.nextSendTime)
}
-func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, client libgoal.Client, cfg PpConfig) error {
+func (pps *WorkerState) fundAccounts(client *libgoal.Client) error {
var srcFunds, minFund uint64
var err error
var tx transactions.Transaction
- srcFunds, err = client.GetBalance(cfg.SrcAccount)
+ srcFunds, err = client.GetBalance(pps.cfg.SrcAccount)
if err != nil {
return err
@@ -288,19 +348,19 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien
// Fee of 0 will make cause the function to use the suggested one by network
fee := uint64(0)
- minFund, _, err = computeAccountMinBalance(client, cfg)
+ minFund, _, err = computeAccountMinBalance(client, pps.cfg)
if err != nil {
return err
}
fmt.Printf("adjusting account balance to %d\n", minFund)
- srcAcct := accounts[cfg.SrcAccount]
+ srcAcct := pps.accounts[pps.cfg.SrcAccount]
- nextSendTime := time.Now()
- for {
- accountsAdjusted := 0
+ accountsAdjusted := 1
+ for accountsAdjusted > 0 {
+ accountsAdjusted = 0
adjStart := time.Now()
- for addr, acct := range accounts {
+ for addr, acct := range pps.accounts {
if addr == pps.cfg.SrcAccount {
continue
}
@@ -308,19 +368,19 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien
if acct.getBalance() >= minFund {
continue
}
- if !cfg.Quiet {
+ if !pps.cfg.Quiet {
fmt.Printf("adjusting balance of account %v\n", addr)
}
toSend := minFund - acct.getBalance()
if srcFunds <= toSend {
- return fmt.Errorf("source account %s has insufficient funds %d - needs %d", cfg.SrcAccount, srcFunds, toSend)
+ return fmt.Errorf("source account %s has insufficient funds %d - needs %d", pps.cfg.SrcAccount, srcFunds, toSend)
}
srcFunds -= toSend
- if !cfg.Quiet {
+ if !pps.cfg.Quiet {
fmt.Printf("adjusting balance of account %v by %d\n ", addr, toSend)
}
- schedule(cfg.TxnPerSec, &nextSendTime)
+ pps.schedule(1)
tx, err = pps.sendPaymentFromSourceAccount(client, addr, fee, toSend, srcAcct)
if err != nil {
if strings.Contains(err.Error(), "broadcast queue full") {
@@ -332,32 +392,29 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien
}
srcFunds -= tx.Fee.Raw
accountsAdjusted++
- if !cfg.Quiet {
+ if !pps.cfg.Quiet {
fmt.Printf("account balance for key %s will be %d\n", addr, minFund)
}
acct.setBalance(minFund)
totalSent++
}
- accounts[cfg.SrcAccount].setBalance(srcFunds)
+ pps.accounts[pps.cfg.SrcAccount].setBalance(srcFunds)
waitStart := time.Now()
// wait until all the above transactions are sent, or that we have no more transactions
// in our pending transaction pool coming from the source account.
- err = waitPendingTransactions([]string{cfg.SrcAccount}, client)
+ err = waitPendingTransactions([]string{pps.cfg.SrcAccount}, client)
if err != nil {
return err
}
waitStop := time.Now()
- if !cfg.Quiet {
+ if !pps.cfg.Quiet {
fmt.Printf("%d sent (%s); waited %s\n", accountsAdjusted, waitStart.Sub(adjStart).String(), waitStop.Sub(waitStart).String())
}
- if accountsAdjusted == 0 {
- break
- }
}
return err
}
-func (pps *WorkerState) sendPaymentFromSourceAccount(client libgoal.Client, to string, fee, amount uint64, srcAcct *pingPongAccount) (transactions.Transaction, error) {
+func (pps *WorkerState) sendPaymentFromSourceAccount(client *libgoal.Client, to string, fee, amount uint64, srcAcct *pingPongAccount) (transactions.Transaction, error) {
// generate a unique note to avoid duplicate transaction failures
note := pps.makeNextUniqueNoteField()
@@ -388,7 +445,7 @@ func (pps *WorkerState) sendPaymentFromSourceAccount(client libgoal.Client, to s
// accounts map have been cleared out of the transaction pool. A prerequesite for this is that
// there is no other source who might be generating transactions that would come from these account
// addresses.
-func waitPendingTransactions(accounts []string, client libgoal.Client) error {
+func waitPendingTransactions(accounts []string, client *libgoal.Client) error {
for _, from := range accounts {
repeat:
pendingTxns, err := client.GetPendingTransactionsByAddress(from, 0)
@@ -411,13 +468,11 @@ func waitPendingTransactions(accounts []string, client libgoal.Client) error {
return nil
}
-func (pps *WorkerState) refreshAccounts(client libgoal.Client, cfg PpConfig) error {
- pps.accountsMu.Lock()
+func (pps *WorkerState) refreshAccounts(client *libgoal.Client) error {
addrs := make([]string, 0, len(pps.accounts))
for addr := range pps.accounts {
addrs = append(addrs, addr)
}
- pps.accountsMu.Unlock()
// wait until all the pending transactions have been sent; otherwise, getting the balance
// is pretty much meaningless.
fmt.Printf("waiting for all transactions to be accepted before refreshing accounts.\n")
@@ -436,13 +491,11 @@ func (pps *WorkerState) refreshAccounts(client libgoal.Client, cfg PpConfig) err
balanceUpdates[addr] = amount
}
- pps.accountsMu.Lock()
- defer pps.accountsMu.Unlock()
for addr, amount := range balanceUpdates {
pps.accounts[addr].setBalance(amount)
}
- return pps.fundAccounts(pps.accounts, client, cfg)
+ return pps.fundAccounts(client)
}
// return a shuffled list of accounts with some minimum balance
@@ -463,7 +516,7 @@ func listSufficientAccounts(accounts map[string]*pingPongAccount, minimumAmount
var logPeriod = 5 * time.Second
// RunPingPong starts ping pong process
-func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
+func (pps *WorkerState) RunPingPong(ctx context.Context, ac *libgoal.Client) {
// Infinite loop given:
// - accounts -> map of accounts to include in transfers (including src account, which we don't want to use)
// - cfg -> configuration for how to proceed
@@ -480,23 +533,21 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
// error = fundAccounts()
// }
- cfg := pps.cfg
+ pps.nextSendTime = time.Now()
+ ac.SetSuggestedParamsCacheAge(200 * time.Millisecond)
+ pps.client = ac
+
var runTime time.Duration
- if cfg.RunTime > 0 {
- runTime = cfg.RunTime
+ if pps.cfg.RunTime > 0 {
+ runTime = pps.cfg.RunTime
} else {
runTime = 10000 * time.Hour // Effectively 'forever'
}
var endTime time.Time
- if cfg.MaxRuntime > 0 {
- endTime = time.Now().Add(cfg.MaxRuntime)
- }
- refreshTime := time.Now().Add(cfg.RefreshTime)
-
- var nftThrottler *throttler
- if pps.cfg.NftAsaPerSecond > 0 {
- nftThrottler = newThrottler(20, float64(pps.cfg.NftAsaPerSecond))
+ if pps.cfg.MaxRuntime > 0 {
+ endTime = time.Now().Add(pps.cfg.MaxRuntime)
}
+ refreshTime := time.Now().Add(pps.cfg.RefreshTime)
lastLog := time.Now()
nextLog := lastLog.Add(logPeriod)
@@ -518,7 +569,7 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
}
if now.After(nextLog) {
dt := now.Sub(lastLog)
- fmt.Printf("%d sent, %0.2f/s (%d total)\n", totalSent-lastTotalSent, float64(totalSent-lastTotalSent)/dt.Seconds(), totalSent)
+ fmt.Printf("%d sent, %0.2f/s (%d total) (%d sc %d sts)\n", totalSent-lastTotalSent, float64(totalSent-lastTotalSent)/dt.Seconds(), totalSent, pps.scheduleCalls, pps.scheduleSteps)
lastTotalSent = totalSent
for now.After(nextLog) {
nextLog = nextLog.Add(logPeriod)
@@ -526,32 +577,18 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
lastLog = now
}
- if cfg.MaxRuntime > 0 && time.Now().After(endTime) {
- fmt.Printf("Terminating after max run time of %.f seconds\n", cfg.MaxRuntime.Seconds())
+ if pps.cfg.MaxRuntime > 0 && time.Now().After(endTime) {
+ fmt.Printf("Terminating after max run time of %.f seconds\n", pps.cfg.MaxRuntime.Seconds())
return
}
- if pps.cfg.NftAsaPerSecond > 0 {
- sent, err := pps.makeNftTraffic(ac)
- if err != nil {
- fmt.Fprintf(os.Stderr, "error sending nft transactions: %v\n", err)
- }
- nftThrottler.maybeSleep(int(sent))
- totalSent += sent
- continue
- }
-
- minimumAmount := cfg.MinAccountFunds + (cfg.MaxAmt+cfg.MaxFee)*2
- pps.accountsMu.RLock()
- fromList := listSufficientAccounts(pps.accounts, minimumAmount, cfg.SrcAccount)
- pps.accountsMu.RUnlock()
+ minimumAmount := pps.cfg.MinAccountFunds + (pps.cfg.MaxAmt+pps.cfg.MaxFee)*2
+ fromList := listSufficientAccounts(pps.accounts, minimumAmount, pps.cfg.SrcAccount)
// in group tests txns are sent back and forth, so both parties need funds
var toList []string
- if cfg.GroupSize == 1 {
+ if pps.cfg.GroupSize == 1 {
minimumAmount = 0
- pps.accountsMu.RLock()
- toList = listSufficientAccounts(pps.accounts, minimumAmount, cfg.SrcAccount)
- pps.accountsMu.RUnlock()
+ toList = listSufficientAccounts(pps.accounts, minimumAmount, pps.cfg.SrcAccount)
} else {
// same selection with another shuffle
toList = make([]string, len(fromList))
@@ -563,16 +600,18 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
totalSent += sent
totalSucceeded += succeeded
if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "error sending transactions: %v\n", err)
+ _, _ = fmt.Fprintf(os.Stderr, "error sending transactions, sleeping .5 seconds: %v\n", err)
+ pps.nextSendTime = time.Now().Add(500 * time.Millisecond)
+ pps.schedule(1)
}
- if cfg.RefreshTime > 0 && time.Now().After(refreshTime) {
- err = pps.refreshAccounts(ac, cfg)
+ if pps.cfg.RefreshTime > 0 && time.Now().After(refreshTime) {
+ err = pps.refreshAccounts(ac)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "error refreshing: %v\n", err)
}
- refreshTime = refreshTime.Add(cfg.RefreshTime)
+ refreshTime = refreshTime.Add(pps.cfg.RefreshTime)
}
}
@@ -586,157 +625,58 @@ func NewPingpong(cfg PpConfig) *WorkerState {
return &WorkerState{cfg: cfg, nftHolders: make(map[string]int)}
}
-func randomizeCreatableID(cfg PpConfig, cinfo CreatablesInfo) (aidx uint64) {
- if cfg.NumAsset > 0 {
- rindex := rand.Intn(len(cinfo.AssetParams))
- i := 0
- for k := range cinfo.AssetParams {
- if i == rindex {
- aidx = k
- break
- }
- i++
- }
- } else if cfg.NumApp > 0 {
- rindex := rand.Intn(len(cinfo.AppParams))
- i := 0
- for k := range cinfo.AppParams {
- if i == rindex {
- aidx = k
- break
- }
- i++
+func (pps *WorkerState) randAssetID() (aidx uint64) {
+ if len(pps.cinfo.AssetParams) == 0 {
+ return 0
+ }
+ rindex := rand.Intn(len(pps.cinfo.AssetParams))
+ i := 0
+ for k := range pps.cinfo.AssetParams {
+ if i == rindex {
+ return k
}
+ i++
}
return
}
-
-func (pps *WorkerState) fee() uint64 {
- cfg := pps.cfg
- fee := cfg.MaxFee
- if cfg.RandomizeFee {
- fee = rand.Uint64()%(cfg.MaxFee-cfg.MinFee) + cfg.MinFee
+func (pps *WorkerState) randAppID() (aidx uint64) {
+ if len(pps.cinfo.AppParams) == 0 {
+ return 0
}
- return fee
-}
-
-func (pps *WorkerState) makeNftTraffic(client libgoal.Client) (sentCount uint64, err error) {
- fee := pps.fee()
- var srcCost uint64
- if (len(pps.nftHolders) == 0) || ((float64(int(pps.cfg.NftAsaAccountInFlight)-len(pps.nftHolders)) / float64(pps.cfg.NftAsaAccountInFlight)) >= rand.Float64()) {
- var addr string
-
- var seed [32]byte
- crypto.RandBytes(seed[:])
- privateKey := crypto.GenerateSignatureSecrets(seed)
- publicKey := basics.Address(privateKey.SignatureVerifier)
-
- pps.accountsMu.Lock()
- pps.accounts[publicKey.String()] = &pingPongAccount{
- sk: privateKey,
- pk: publicKey,
- }
- pps.accountsMu.Unlock()
- addr = publicKey.String()
-
- fmt.Printf("new NFT holder %s\n", addr)
- var proto config.ConsensusParams
- proto, err = getProto(client)
- if err != nil {
- return
- }
- // enough for the per-asa minbalance and more than enough for the txns to create them
- toSend := proto.MinBalance * uint64(pps.cfg.NftAsaPerAccount+1) * 2
- pps.nftHolders[addr] = 0
- var tx transactions.Transaction
- srcAcct := pps.acct(pps.cfg.SrcAccount)
- tx, err = pps.sendPaymentFromSourceAccount(client, addr, fee, toSend, srcAcct)
- if err != nil {
- return
+ rindex := rand.Intn(len(pps.cinfo.AppParams))
+ i := 0
+ for k := range pps.cinfo.AppParams {
+ if i == rindex {
+ return k
}
- srcCost += tx.Fee.Raw + toSend
- sentCount++
- // we ran one txn above already to fund the new addr,
- // we'll run a second txn below
- }
- pps.accountsMu.Lock()
- pps.accounts[pps.cfg.SrcAccount].addBalance(-int64(srcCost))
- pps.accountsMu.Unlock()
- // pick a random sender from nft holder sub accounts
- pick := rand.Intn(len(pps.nftHolders))
- pos := 0
- var sender string
- var senderNftCount int
- for addr, nftCount := range pps.nftHolders {
- sender = addr
- senderNftCount = nftCount
- if pos == pick {
- break
- }
- pos++
-
- }
- var meta [32]byte
- rand.Read(meta[:])
- assetName := pps.nftSpamAssetName()
- const totalSupply = 1
- txn, err := client.MakeUnsignedAssetCreateTx(totalSupply, false, sender, sender, sender, sender, "ping", assetName, "", meta[:], 0)
- if err != nil {
- fmt.Printf("Cannot make asset create txn with meta %v\n", meta)
- return
- }
- txn, err = client.FillUnsignedTxTemplate(sender, 0, 0, pps.cfg.MaxFee, txn)
- if err != nil {
- fmt.Printf("Cannot fill asset creation txn\n")
- return
- }
- if senderNftCount+1 >= int(pps.cfg.NftAsaPerAccount) {
- delete(pps.nftHolders, sender)
- } else {
- pps.nftHolders[sender] = senderNftCount + 1
- }
- signer := pps.acct(sender)
- stxn, err := signTxn(signer, txn, pps.cfg)
- if err != nil {
- return
+ i++
}
+ return
+}
- _, err = client.BroadcastTransaction(stxn)
- if err != nil {
- return
+func (pps *WorkerState) fee() uint64 {
+ fee := pps.cfg.MaxFee
+ if pps.cfg.RandomizeFee {
+ fee = rand.Uint64()%(pps.cfg.MaxFee-pps.cfg.MinFee) + pps.cfg.MinFee
}
- sentCount++
- return
+ return fee
}
func (pps *WorkerState) acct(from string) *pingPongAccount {
- pps.accountsMu.RLock()
- defer pps.accountsMu.RUnlock()
return pps.accounts[from]
}
func (pps *WorkerState) sendFromTo(
fromList, toList []string,
- client libgoal.Client, nextSendTime *time.Time,
+ client *libgoal.Client, nextSendTime *time.Time,
) (sentCount, successCount uint64, err error) {
- cinfo := pps.cinfo
- cfg := pps.cfg
-
- amt := cfg.MaxAmt
var minAccountRunningBalance uint64
- _, minAccountRunningBalance, err = computeAccountMinBalance(client, cfg)
+ _, minAccountRunningBalance, err = computeAccountMinBalance(client, pps.cfg)
if err != nil {
return 0, 0, err
}
belowMinBalanceAccounts := make(map[string] /*basics.Address*/ bool)
- assetsByCreator := make(map[string][]*v1.AssetParams)
- for _, p := range cinfo.AssetParams {
- c := p.Creator
- ap := &v1.AssetParams{}
- *ap = p
- assetsByCreator[c] = append(assetsByCreator[c], ap)
- }
for i, from := range fromList {
// keep going until the balances of at least 20% of the accounts is too low.
@@ -749,14 +689,10 @@ func (pps *WorkerState) sendFromTo(
continue
}
- if cfg.RandomizeAmt {
- amt = ((rand.Uint64() % cfg.MaxAmt) + 1) % cfg.MaxAmt
- }
-
fee := pps.fee()
to := toList[i]
- if cfg.RandomizeDst {
+ if pps.cfg.RandomizeDst {
var addr basics.Address
crypto.RandBytes(addr[:])
to = addr.String()
@@ -772,22 +708,15 @@ func (pps *WorkerState) sendFromTo(
// Broadcast transaction
var sendErr error
- fromBalanceChange := int64(0)
- toBalanceChange := int64(0)
- if cfg.NumAsset > 0 {
- amt = 1
- } else if cfg.NumApp > 0 {
- amt = 0
- }
- fromAcct := pps.acct(from)
- if cfg.GroupSize == 1 {
- // generate random assetID or appId if we send asset/app txns
- aidx := randomizeCreatableID(cfg, cinfo)
+ var fromAcct *pingPongAccount
+ var update txnUpdate
+ var updates []txnUpdate
+ if pps.cfg.GroupSize == 1 {
var txn transactions.Transaction
var consErr error
// Construct single txn
- txn, from, consErr = pps.constructTxn(from, to, fee, amt, aidx, client)
+ txn, from, update, consErr = pps.constructTxn(from, to, fee, client)
if consErr != nil {
err = consErr
_, _ = fmt.Fprintf(os.Stderr, "constructTxn failed: %v\n", err)
@@ -795,26 +724,29 @@ func (pps *WorkerState) sendFromTo(
}
// would we have enough money after taking into account the current updated fees ?
- if fromAcct.getBalance() <= (txn.Fee.Raw + amt + minAccountRunningBalance) {
- _, _ = fmt.Fprintf(os.Stdout, "Skipping sending %d: %s -> %s; Current cost too high(%d <= %d + %d + %d).\n", amt, from, to, fromAcct.getBalance(), txn.Fee.Raw, amt, minAccountRunningBalance)
+ fromAcct = pps.acct(from)
+ if fromAcct == nil {
+ err = fmt.Errorf("tx %v from %s -> no acct", txn, from)
+ fmt.Fprintf(os.Stderr, "%s\n", err.Error())
+ return
+ }
+
+ if fromAcct.getBalance() <= (txn.Fee.Raw + pps.cfg.MaxAmt + minAccountRunningBalance) {
+ _, _ = fmt.Fprintf(os.Stdout, "Skipping sending %d: %s -> %s; Current cost too high(%d <= %d + %d + %d).\n", pps.cfg.MaxAmt, from, to, fromAcct.getBalance(), txn.Fee.Raw, pps.cfg.MaxAmt, minAccountRunningBalance)
belowMinBalanceAccounts[from] = true
continue
}
- fromBalanceChange = -int64(txn.Fee.Raw + amt)
- toBalanceChange = int64(amt)
-
// Sign txn
- signer := pps.acct(from)
- stxn, signErr := signTxn(signer, txn, cfg)
+ stxn, signErr := signTxn(fromAcct, txn, pps.cfg)
if signErr != nil {
err = signErr
_, _ = fmt.Fprintf(os.Stderr, "signTxn failed: %v\n", err)
return
}
- schedule(cfg.TxnPerSec, nextSendTime)
sentCount++
+ pps.schedule(1)
_, sendErr = client.BroadcastTransaction(stxn)
} else {
// Generate txn group
@@ -826,31 +758,22 @@ func (pps *WorkerState) sendFromTo(
var txGroup []transactions.Transaction
var txSigners []string
- for j := 0; j < int(cfg.GroupSize); j++ {
+ for j := 0; j < int(pps.cfg.GroupSize); j++ {
var txn transactions.Transaction
var signer string
if j%2 == 0 {
- txn, signer, err = pps.constructTxn(from, to, fee, amt, 0, client)
- fromBalanceChange -= int64(txn.Fee.Raw + amt)
- toBalanceChange += int64(amt)
- } else if cfg.GroupSize == 2 && cfg.Rekey {
- txn, _, err = pps.constructTxn(from, to, fee, amt, 0, client)
- fromBalanceChange -= int64(txn.Fee.Raw + amt)
- toBalanceChange += int64(amt)
+ txn, signer, update, err = pps.constructTxn(from, to, fee, client)
+ } else if pps.cfg.GroupSize == 2 && pps.cfg.Rekey {
+ txn, _, update, err = pps.constructTxn(from, to, fee, client)
signer = to
} else {
- txn, signer, err = pps.constructTxn(to, from, fee, amt, 0, client)
- toBalanceChange -= int64(txn.Fee.Raw + amt)
- fromBalanceChange += int64(amt)
+ txn, signer, update, err = pps.constructTxn(to, from, fee, client)
}
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "group tx failed: %v\n", err)
return
}
- if cfg.RandomizeAmt && j%2 == 1 {
- amt = rand.Uint64()%cfg.MaxAmt + 1
- }
- if cfg.Rekey {
+ if pps.cfg.Rekey {
if from == signer {
// rekey to the receiver the first txn of the rekeying pair
txn.RekeyTo, err = basics.UnmarshalChecksumAddress(to)
@@ -865,17 +788,7 @@ func (pps *WorkerState) sendFromTo(
}
txGroup = append(txGroup, txn)
txSigners = append(txSigners, signer)
- }
-
- // would we have enough money after taking into account the current updated fees ?
- if int64(fromAcct.getBalance())+fromBalanceChange <= int64(cfg.MinAccountFunds) {
- _, _ = fmt.Fprintf(os.Stdout, "Skipping sending %d : %s -> %s; Current cost too high.\n", amt, from, to)
- continue
- }
- toAcct := pps.acct(to)
- if int64(toAcct.getBalance())+toBalanceChange <= int64(cfg.MinAccountFunds) {
- _, _ = fmt.Fprintf(os.Stdout, "Skipping sending back %d : %s -> %s; Current cost too high.\n", amt, to, from)
- continue
+ updates = append(updates, update)
}
// Generate group ID
@@ -885,7 +798,7 @@ func (pps *WorkerState) sendFromTo(
return
}
- if !cfg.Quiet {
+ if !pps.cfg.Quiet {
_, _ = fmt.Fprintf(os.Stdout, "Sending TxnGroup: ID %v, size %v \n", gid, len(txGroup))
}
@@ -895,29 +808,34 @@ func (pps *WorkerState) sendFromTo(
for j, txn := range txGroup {
txn.Group = gid
signer := pps.acct(txSigners[j])
- stxGroup[j], signErr = signTxn(signer, txn, cfg)
+ stxGroup[j], signErr = signTxn(signer, txn, pps.cfg)
if signErr != nil {
err = signErr
return
}
}
- schedule(cfg.TxnPerSec, nextSendTime)
- sentCount++
+ sentCount += uint64(len(txGroup))
+ pps.schedule(len(txGroup))
sendErr = client.BroadcastTransactionGroup(stxGroup)
}
if sendErr != nil {
- _, _ = fmt.Fprintf(os.Stderr, "error sending Transaction, sleeping .5 seconds: %v\n", sendErr)
err = sendErr
- time.Sleep(500 * time.Millisecond)
return
}
- successCount++
- fromAcct.addBalance(fromBalanceChange)
- // avoid updating the "to" account.
+ // assume that if it was accepted by an algod, it got processed
+ // (this is a bad assumption, we should be checking pending status or reading blocks to see if our txid were committed)
+ if len(updates) > 0 {
+ for _, ud := range updates {
+ ud.apply(pps)
+ }
+ } else if update != nil {
+ update.apply(pps)
+ }
+ successCount++
}
return
}
@@ -936,74 +854,14 @@ func (pps *WorkerState) makeNextUniqueNoteField() []byte {
return noteField[:usedBytes]
}
-func (pps *WorkerState) roundMonitor(client libgoal.Client) {
- var minFund uint64
- var err error
- for {
- minFund, _, err = computeAccountMinBalance(client, pps.cfg)
- if err == nil {
- break
- }
- }
- var newBalance uint64
- for {
- paramsResp, err := client.SuggestedParams()
- if err != nil {
- time.Sleep(5 * time.Millisecond)
- continue
- }
- pendingTxns, err := client.GetPendingTransactions(0)
- if err != nil {
- time.Sleep(5 * time.Millisecond)
- continue
- }
- pps.muSuggestedParams.Lock()
- pps.suggestedParams = paramsResp
- pps.pendingTxns = pendingTxns
- pps.muSuggestedParams.Unlock()
-
- // take a quick snapshot of accounts to decrease mutex shadow
- pps.accountsMu.Lock()
- accountsSnapshot := make([]*pingPongAccount, 0, len(pps.accounts))
- for _, acct := range pps.accounts {
- accountsSnapshot = append(accountsSnapshot, acct)
- }
- pps.accountsMu.Unlock()
-
- for _, acct := range accountsSnapshot {
- acct.Lock()
- needRefresh := acct.balance < minFund && acct.balanceRound < paramsResp.LastRound
- acct.Unlock()
- if needRefresh {
- newBalance, err = client.GetBalance(acct.pk.String())
- if err == nil {
- acct.Lock()
- acct.balanceRound, acct.balance = paramsResp.LastRound, newBalance
- acct.Unlock()
- }
- }
- }
-
- // wait for the next round.
- waitForNextRoundOrSleep(client, 200*time.Millisecond)
- }
-}
-
-func (pps *WorkerState) getSuggestedParams() v1.TransactionParams {
- pps.muSuggestedParams.Lock()
- defer pps.muSuggestedParams.Unlock()
- return pps.suggestedParams
-}
+var errNotOptedIn = errors.New("not opted in")
-func (pps *WorkerState) constructTxn(from, to string, fee, amt, aidx uint64, client libgoal.Client) (txn transactions.Transaction, sender string, err error) {
- cfg := pps.cfg
- cinfo := pps.cinfo
- sender = from
+func (pps *WorkerState) constructTxn(from, to string, fee uint64, client *libgoal.Client) (txn transactions.Transaction, sender string, update txnUpdate, err error) {
var noteField []byte
const pingpongTag = "pingpong"
const tagLen = len(pingpongTag)
// if random note flag set, then append a random number of additional bytes
- if cfg.RandomNote {
+ if pps.cfg.RandomNote {
const maxNoteFieldLen = 1024
noteLength := tagLen + int(rand.Uint32())%(maxNoteFieldLen-tagLen)
noteField = make([]byte, noteLength)
@@ -1015,83 +873,38 @@ func (pps *WorkerState) constructTxn(from, to string, fee, amt, aidx uint64, cli
// if random lease flag set, fill the lease field with random bytes
var lease [32]byte
- if cfg.RandomLease {
+ if pps.cfg.RandomLease {
crypto.RandBytes(lease[:])
}
- if cfg.NumApp > 0 { // Construct app transaction
- // select opted-in accounts for Txn.Accounts field
- var accounts []string
- assetOptIns := cinfo.OptIns[aidx]
- if len(assetOptIns) > 0 {
- indices := rand.Perm(len(assetOptIns))
- limit := 5
- if len(indices) < limit {
- limit = len(indices)
- }
- for i := 0; i < limit; i++ {
- idx := indices[i]
- accounts = append(accounts, assetOptIns[idx])
- }
- if cinfo.AssetParams[aidx].Creator == from {
- // if the application was created by the "from" account, then we don't need to worry about it being opted-in.
- } else {
- fromIsOptedIn := false
- for i := 0; i < len(assetOptIns); i++ {
- if assetOptIns[i] == from {
- fromIsOptedIn = true
- break
- }
- }
- if !fromIsOptedIn {
- sender = accounts[0]
- from = sender
- }
- }
- accounts = accounts[1:]
- }
- txn, err = client.MakeUnsignedAppNoOpTx(aidx, nil, accounts, nil, nil)
- if err != nil {
- return
- }
- txn.Note = noteField[:]
- txn.Lease = lease
- txn, err = client.FillUnsignedTxTemplate(from, 0, 0, cfg.MaxFee, txn)
- if !cfg.Quiet {
- _, _ = fmt.Fprintf(os.Stdout, "Calling app %d : %s\n", aidx, from)
- }
- } else if cfg.NumAsset > 0 { // Construct asset transaction
- // select a pair of random opted-in accounts by aidx
- // use them as from/to addresses
- if from != to {
- if len(cinfo.OptIns[aidx]) > 0 {
- indices := rand.Perm(len(cinfo.OptIns[aidx]))
- from = cinfo.OptIns[aidx][indices[0]]
- to = cinfo.OptIns[aidx][indices[1]]
- sender = from
- } else {
- err = fmt.Errorf("asset %d has not been opted in by any account", aidx)
- _, _ = fmt.Fprintf(os.Stdout, "error constructing transaction - %v\n", err)
- return
- }
- }
- txn, err = client.MakeUnsignedAssetSendTx(aidx, amt, to, "", "")
- if err != nil {
- _, _ = fmt.Fprintf(os.Stdout, "error making unsigned asset send tx %v\n", err)
- return
+ // weighted random selection of traffic type
+ // TODO: construct*Txn() have the same signature, make this data structures and loop over them?
+ totalWeight := pps.cfg.WeightPayment + pps.cfg.WeightAsset + pps.cfg.WeightApp
+ target := rand.Float64() * totalWeight
+ if target < pps.cfg.WeightAsset && pps.cfg.NumAsset > 0 {
+ txn, sender, update, err = pps.constructAssetTxn(from, to, fee, client, noteField, lease)
+ if err != errNotOptedIn {
+ goto weightdone
}
- txn.Note = noteField[:]
- txn.Lease = lease
- txn, err = client.FillUnsignedTxTemplate(sender, 0, 0, cfg.MaxFee, txn)
- if !cfg.Quiet {
- _, _ = fmt.Fprintf(os.Stdout, "Sending %d asset %d: %s -> %s\n", amt, aidx, sender, to)
+ }
+ target -= pps.cfg.WeightAsset
+ if target < pps.cfg.WeightApp && pps.cfg.NumApp > 0 {
+ txn, sender, update, err = pps.constructAppTxn(from, to, fee, client, noteField, lease)
+ if err != errNotOptedIn {
+ goto weightdone
}
- } else {
- txn, err = pps.constructPayment(from, to, fee, amt, noteField, "", lease)
- if !cfg.Quiet {
- _, _ = fmt.Fprintf(os.Stdout, "Sending %d : %s -> %s\n", amt, from, to)
+ }
+ target -= pps.cfg.WeightApp
+ if target < pps.cfg.WeightNFTCreation && pps.cfg.NftAsaPerSecond > 0 {
+ txn, sender, update, err = pps.constructNFTGenTxn(from, to, fee, client, noteField, lease)
+ if err != errNotOptedIn {
+ goto weightdone
}
}
+ // TODO: other traffic types here
+ // fallback on payment
+ txn, sender, update, err = pps.constructPaymentTxn(from, to, fee, client, noteField, lease)
+weightdone:
if err != nil {
_, _ = fmt.Fprintf(os.Stdout, "error constructing transaction %v\n", err)
@@ -1100,8 +913,8 @@ func (pps *WorkerState) constructTxn(from, to string, fee, amt, aidx uint64, cli
// adjust transaction duration for 5 rounds. That would prevent it from getting stuck in the transaction pool for too long.
txn.LastValid = txn.FirstValid + 5
- // if cfg.MaxFee == 0, automatically adjust the fee amount to required min fee
- if cfg.MaxFee == 0 {
+ // if pps.cfg.MaxFee == 0, automatically adjust the fee amount to required min fee
+ if pps.cfg.MaxFee == 0 {
var suggestedFee uint64
suggestedFee, err = client.SuggestedFee()
if err != nil {
@@ -1115,85 +928,313 @@ func (pps *WorkerState) constructTxn(from, to string, fee, amt, aidx uint64, cli
return
}
-// ConstructPayment builds a payment transaction to be signed
-// If the fee is 0, the function will use the suggested one form the network
-// Although firstValid and lastValid come pre-computed in a normal flow,
-// additional validation is done by computeValidityRounds:
-// if the lastValid is 0, firstValid + maxTxnLifetime will be used
-// if the firstValid is 0, lastRound + 1 will be used
-func (pps *WorkerState) constructPayment(from, to string, fee, amount uint64, note []byte, closeTo string, lease [32]byte) (transactions.Transaction, error) {
- fromAddr, err := basics.UnmarshalChecksumAddress(from)
- if err != nil {
- return transactions.Transaction{}, err
+type txnUpdate interface {
+ apply(pps *WorkerState)
+}
+
+func (pps *WorkerState) constructPaymentTxn(from, to string, fee uint64, client *libgoal.Client, noteField []byte, lease [32]byte) (txn transactions.Transaction, sender string, update txnUpdate, err error) {
+ amt := pps.cfg.MaxAmt
+ if pps.cfg.RandomizeAmt {
+ amt = uint64(rand.Int63n(int64(pps.cfg.MaxAmt-1))) + 1
+ }
+ txn, err = client.ConstructPayment(from, to, fee, amt, noteField, "", lease, 0, 0)
+ if !pps.cfg.Quiet {
+ _, _ = fmt.Fprintf(os.Stdout, "Sending %d : %s -> %s\n", amt, from, to)
+ }
+ update = &paymentUpdate{
+ from: from,
+ to: to,
+ amt: amt,
+ fee: fee,
}
+ return txn, from, update, err
+}
+
+type paymentUpdate struct {
+ from string
+ to string
+ amt uint64
+ fee uint64
+}
- var toAddr basics.Address
- if to != "" {
- toAddr, err = basics.UnmarshalChecksumAddress(to)
+func (au *paymentUpdate) apply(pps *WorkerState) {
+ pps.accounts[au.from].balance -= (au.fee + au.amt)
+ pps.accounts[au.to].balance += au.amt
+}
+
+// return true with probability 1/i
+func pReplace(i int) bool {
+ if i <= 1 {
+ return true
+ }
+ return rand.Intn(i) == 0
+}
+
+func (pps *WorkerState) constructAssetTxn(from, toUnused string, fee uint64, client *libgoal.Client, noteField []byte, lease [32]byte) (txn transactions.Transaction, sender string, update txnUpdate, err error) {
+ // select a pair of random opted-in accounts by aidx
+ // use them as from/to addresses
+ amt := uint64(1)
+ aidx := pps.randAssetID()
+ if aidx == 0 {
+ err = fmt.Errorf("no known assets")
+ return
+ }
+ if len(pps.cinfo.OptIns[aidx]) == 0 {
+ // Opt-in another
+ // TODO: continue opt-in up to some amount? gradually?
+ txn, err = pps.appOptIn(from, aidx, client)
if err != nil {
- return transactions.Transaction{}, err
+ return
+ }
+ update = &appOptInUpdate{
+ addr: from,
+ aidx: aidx,
}
+ return txn, from, update, nil
}
- // Get current round, protocol, genesis ID
- var params v1.TransactionParams
- for params.LastRound == 0 {
- params = pps.getSuggestedParams()
- }
-
- cp, ok := config.Consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
- if !ok {
- return transactions.Transaction{}, fmt.Errorf("ConstructPayment: unknown consensus protocol %s", params.ConsensusVersion)
- }
- fv := params.LastRound + 1
- lv := fv + cp.MaxTxnLife - 1
-
- tx := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: fromAddr,
- Fee: basics.MicroAlgos{Raw: fee},
- FirstValid: basics.Round(fv),
- LastValid: basics.Round(lv),
- Lease: lease,
- Note: note,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: toAddr,
- Amount: basics.MicroAlgos{Raw: amount},
- },
- }
-
- // If requesting closing, put it in the transaction. The protocol might
- // not support it, but in that case, better to fail the transaction,
- // because the user explicitly asked for it, and it's not supported.
- if closeTo != "" {
- closeToAddr, err := basics.UnmarshalChecksumAddress(closeTo)
- if err != nil {
- return transactions.Transaction{}, err
+ optInsForAsset := pps.cinfo.OptIns[aidx]
+
+ var richest *pingPongAccount
+ var richestv uint64
+ var fromAcct *pingPongAccount
+ var toAcct *pingPongAccount
+ for i, addr := range optInsForAsset {
+ acct := pps.accounts[addr]
+ if acct.holdings[aidx] > richestv {
+ richestv = acct.holdings[aidx]
+ richest = acct
+ continue
+ }
+ if (acct.holdings[aidx] > 1000) && (fromAcct == nil || pReplace(i)) {
+ fromAcct = acct
+ continue
+ }
+ if toAcct == nil || pReplace(i) {
+ toAcct = acct
+ continue
}
+ }
+ if richest == nil {
+ err = fmt.Errorf("don't know any account holding asset %d", aidx)
+ return
+ }
+ if fromAcct == nil {
+ fromAcct = richest
+ }
+ if toAcct == nil {
+ toAcct = fromAcct
+ }
+
+ to := toAcct.pk.String()
+ from = fromAcct.pk.String()
+ sender = from
+ if to != from {
+ if toAcct.holdings[aidx] < 1000 && fromAcct.holdings[aidx] > 11000 {
+ amt = 10000
+ }
+ }
+ txn, err = client.MakeUnsignedAssetSendTx(aidx, amt, to, "", "")
+ if err != nil {
+ _, _ = fmt.Fprintf(os.Stdout, "error making unsigned asset send tx %v\n", err)
+ return
+ }
+ txn.Note = noteField[:]
+ txn.Lease = lease
+ txn, err = client.FillUnsignedTxTemplate(sender, 0, 0, fee, txn)
+ if !pps.cfg.Quiet {
+ _, _ = fmt.Fprintf(os.Stdout, "Sending %d asset %d: %s -> %s\n", amt, aidx, sender, to)
+ }
+ update = &assetUpdate{
+ from: from,
+ to: to,
+ aidx: aidx,
+ amt: amt,
+ fee: fee,
+ }
+ return txn, sender, update, err
+}
+
+type appOptInUpdate struct {
+ addr string
+ aidx uint64
+}
+
+func (au *appOptInUpdate) apply(pps *WorkerState) {
+ pps.accounts[au.addr].holdings[au.aidx] = 0
+ pps.cinfo.OptIns[au.aidx] = uniqueAppend(pps.cinfo.OptIns[au.aidx], au.addr)
+}
+
+type nopUpdate struct {
+}
+
+func (au *nopUpdate) apply(pps *WorkerState) {
+}
+
+var nopUpdateSingleton = &nopUpdate{}
+
+type assetUpdate struct {
+ from string
+ to string
+ aidx uint64
+ amt uint64
+ fee uint64
+}
- tx.PaymentTxnFields.CloseRemainderTo = closeToAddr
+func (au *assetUpdate) apply(pps *WorkerState) {
+ pps.accounts[au.from].balance -= au.fee
+ pps.accounts[au.from].holdings[au.aidx] -= au.amt
+ to := pps.accounts[au.to]
+ if to.holdings == nil {
+ to.holdings = make(map[uint64]uint64)
}
+ to.holdings[au.aidx] += au.amt
+}
+
+func (pps *WorkerState) constructAppTxn(from, to string, fee uint64, client *libgoal.Client, noteField []byte, lease [32]byte) (txn transactions.Transaction, sender string, update txnUpdate, err error) {
+ // select opted-in accounts for Txn.Accounts field
+ var accounts []string
+ aidx := pps.randAppID()
+ if aidx == 0 {
+ err = fmt.Errorf("no known apps")
+ return
+ }
+ appOptIns := pps.cinfo.OptIns[aidx]
+ sender = from
+ if len(appOptIns) > 0 {
+ indices := rand.Perm(len(appOptIns))
+ limit := 5
+ if len(indices) < limit {
+ limit = len(indices)
+ }
+ for i := 0; i < limit; i++ {
+ idx := indices[i]
+ accounts = append(accounts, appOptIns[idx])
+ }
+ if pps.cinfo.AppParams[aidx].Creator == from {
+ // if the application was created by the "from" account, then we don't need to worry about it being opted-in.
+ } else {
+ fromIsOptedIn := false
+ for i := 0; i < len(appOptIns); i++ {
+ if appOptIns[i] == from {
+ fromIsOptedIn = true
+ break
+ }
+ }
+ if !fromIsOptedIn {
+ sender = accounts[0]
+ from = sender
+ }
+ }
+ accounts = accounts[1:]
+ }
+ txn, err = client.MakeUnsignedAppNoOpTx(aidx, nil, accounts, nil, nil)
+ if err != nil {
+ return
+ }
+ txn.Note = noteField[:]
+ txn.Lease = lease
+ txn, err = client.FillUnsignedTxTemplate(from, 0, 0, fee, txn)
+ if !pps.cfg.Quiet {
+ _, _ = fmt.Fprintf(os.Stdout, "Calling app %d : %s\n", aidx, from)
+ }
+ update = &appUpdate{
+ from: from,
+ fee: fee,
+ }
+ return txn, sender, update, err
+}
+
+type appUpdate struct {
+ from string
+ fee uint64
+}
- tx.Header.GenesisID = params.GenesisID
+func (au *appUpdate) apply(pps *WorkerState) {
+ pps.accounts[au.from].balance -= au.fee
+}
+
+func (pps *WorkerState) constructNFTGenTxn(from, to string, fee uint64, client *libgoal.Client, noteField []byte, lease [32]byte) (txn transactions.Transaction, sender string, update txnUpdate, err error) {
+ if (len(pps.nftHolders) == 0) || ((float64(int(pps.cfg.NftAsaAccountInFlight)-len(pps.nftHolders)) / float64(pps.cfg.NftAsaAccountInFlight)) >= rand.Float64()) {
+ var addr string
- // Check if the protocol supports genesis hash
- if cp.SupportGenesisHash {
- copy(tx.Header.GenesisHash[:], params.GenesisHash)
+ var seed [32]byte
+ crypto.RandBytes(seed[:])
+ privateKey := crypto.GenerateSignatureSecrets(seed)
+ publicKey := basics.Address(privateKey.SignatureVerifier)
+
+ pps.accounts[publicKey.String()] = &pingPongAccount{
+ sk: privateKey,
+ pk: publicKey,
+ }
+ addr = publicKey.String()
+
+ fmt.Printf("new NFT holder %s\n", addr)
+ var proto config.ConsensusParams
+ proto, err = getProto(client)
+ if err != nil {
+ return
+ }
+ // enough for the per-asa minbalance and more than enough for the txns to create them
+ amount := proto.MinBalance * uint64(pps.cfg.NftAsaPerAccount+1) * 2
+ pps.nftHolders[addr] = 0
+ srcAcct := pps.acct(pps.cfg.SrcAccount)
+ sender = srcAcct.pk.String()
+ txn, err = client.ConstructPayment(sender, to, fee, amount, noteField, "", [32]byte{}, 0, 0)
+ update = &paymentUpdate{
+ from: from,
+ to: to,
+ fee: fee,
+ amt: amount,
+ }
+ return txn, sender, update, err
}
+ // pick a random sender from nft holder sub accounts
+ pick := rand.Intn(len(pps.nftHolders))
+ pos := 0
+ var senderNftCount int
+ for addr, nftCount := range pps.nftHolders {
+ sender = addr
+ senderNftCount = nftCount
+ if pos == pick {
+ break
+ }
+ pos++
- // Default to the suggested fee, if the caller didn't supply it
- // Fee is tricky, should taken care last. We encode the final transaction to get the size post signing and encoding
- // Then, we multiply it by the suggested fee per byte.
- if fee == 0 {
- tx.Fee = basics.MulAIntSaturate(basics.MicroAlgos{Raw: params.Fee}, tx.EstimateEncodedSize())
}
- if tx.Fee.Raw < cp.MinTxnFee {
- tx.Fee.Raw = cp.MinTxnFee
+ var meta [32]byte
+ rand.Read(meta[:])
+ assetName := pps.nftSpamAssetName()
+ const totalSupply = 1
+ txn, err = client.MakeUnsignedAssetCreateTx(totalSupply, false, sender, sender, sender, sender, "ping", assetName, "", meta[:], 0)
+ if err != nil {
+ fmt.Printf("Cannot make asset create txn with meta %v\n", meta)
+ return
+ }
+ txn, err = client.FillUnsignedTxTemplate(sender, 0, 0, fee, txn)
+ if err != nil {
+ fmt.Printf("Cannot fill asset creation txn\n")
+ return
+ }
+ if senderNftCount+1 >= int(pps.cfg.NftAsaPerAccount) {
+ delete(pps.nftHolders, sender)
+ } else {
+ pps.nftHolders[sender] = senderNftCount + 1
+ }
+ update = &nftgenUpdate{
+ from: from,
+ fee: fee,
}
+ return txn, sender, update, err
+}
- return tx, nil
+type nftgenUpdate struct {
+ from string
+ fee uint64
+}
+
+func (au *nftgenUpdate) apply(pps *WorkerState) {
+ pps.accounts[au.from].balance -= au.fee
}
func signTxn(signer *pingPongAccount, txn transactions.Transaction, cfg PpConfig) (stxn transactions.SignedTxn, err error) {
@@ -1203,7 +1244,7 @@ func signTxn(signer *pingPongAccount, txn transactions.Transaction, cfg PpConfig
if cfg.Rekey {
stxn, err = txn.Sign(signer.sk), nil
- } else if len(cfg.Program) > 0 {
+ } else if len(cfg.Program) > 0 && rand.Float64() < cfg.ProgramProbability {
// If there's a program, sign it and use that in a lsig
progb := logic.Program(cfg.Program)
psig = signer.sk.Sign(&progb)
@@ -1220,53 +1261,3 @@ func signTxn(signer *pingPongAccount, txn transactions.Transaction, cfg PpConfig
}
return
}
-
-type timeCount struct {
- when time.Time
- count int
-}
-
-type throttler struct {
- times []timeCount
-
- next int
-
- // target x per-second
- xps float64
-
- // rough proportional + integral control
- iterm float64
-}
-
-func newThrottler(windowSize int, targetPerSecond float64) *throttler {
- return &throttler{times: make([]timeCount, windowSize), xps: targetPerSecond, iterm: 0.0}
-}
-
-func (t *throttler) maybeSleep(count int) {
- now := time.Now()
- t.times[t.next].when = now
- t.times[t.next].count = count
- nn := (t.next + 1) % len(t.times)
- t.next = nn
- if t.times[nn].when.IsZero() {
- return
- }
- dt := now.Sub(t.times[nn].when)
- countsum := 0
- for i, tc := range t.times {
- if i != nn {
- countsum += tc.count
- }
- }
- rate := float64(countsum) / dt.Seconds()
- if rate > t.xps {
- // rate too high, slow down
- desiredSeconds := float64(countsum) / t.xps
- extraSeconds := desiredSeconds - dt.Seconds()
- t.iterm += 0.1 * extraSeconds / float64(len(t.times))
- util.NanoSleep(time.Duration(1000000000.0 * (extraSeconds + t.iterm) / float64(len(t.times))))
-
- } else {
- t.iterm *= 0.95
- }
-}
diff --git a/stateproof/worker_test.go b/stateproof/worker_test.go
index 4c35c8c69..914514117 100644
--- a/stateproof/worker_test.go
+++ b/stateproof/worker_test.go
@@ -21,7 +21,7 @@ import (
"database/sql"
"encoding/binary"
"fmt"
- "io/ioutil"
+ "io"
"strings"
"sync"
"testing"
@@ -573,7 +573,7 @@ func TestSignerDoesntDeleteKeysWhenDBDoesntStoreSigs(t *testing.T) {
dbs, _ := dbOpenTest(t, true)
logger := logging.NewLogger()
- logger.SetOutput(ioutil.Discard)
+ logger.SetOutput(io.Discard)
w := NewWorker(dbs.Wdb, logger, s, s, s, s)
diff --git a/test/commandandcontrol/cc_agent/component/pingPongComponent.go b/test/commandandcontrol/cc_agent/component/pingPongComponent.go
index 7f992e793..5bb4be890 100644
--- a/test/commandandcontrol/cc_agent/component/pingPongComponent.go
+++ b/test/commandandcontrol/cc_agent/component/pingPongComponent.go
@@ -20,7 +20,7 @@ import (
"context"
"encoding/json"
"fmt"
- "io/ioutil"
+ "os"
"time"
"github.com/algorand/go-algorand/libgoal"
@@ -101,7 +101,7 @@ func (componentInstance *PingPongComponentInstance) Terminate() (err error) {
func (componentInstance *PingPongComponentInstance) startPingPong(cfg *pingpong.PpConfig) (err error) {
// Make a cache dir for wallet handle tokens
- cacheDir, err := ioutil.TempDir(GetHostAgent().TempDir, PINGPONG)
+ cacheDir, err := os.MkdirTemp(GetHostAgent().TempDir, PINGPONG)
if err != nil {
log.Errorf("Cannot make temp dir: %v\n", err)
return
@@ -124,7 +124,7 @@ func (componentInstance *PingPongComponentInstance) startPingPong(cfg *pingpong.
// Initialize accounts if necessary, this may take several attempts while previous transactions to settle
for i := 0; i < 10; i++ {
- err = pps.PrepareAccounts(ac)
+ err = pps.PrepareAccounts(&ac)
if err == nil {
break
} else {
@@ -143,7 +143,7 @@ func (componentInstance *PingPongComponentInstance) startPingPong(cfg *pingpong.
componentInstance.ctx, componentInstance.cancelFunc = context.WithCancel(context.Background())
// Kick off the real processing
- go pps.RunPingPong(componentInstance.ctx, ac)
+ go pps.RunPingPong(componentInstance.ctx, &ac)
return
}
diff --git a/test/commandandcontrol/cc_client/main.go b/test/commandandcontrol/cc_client/main.go
index c45fec1b8..817afb850 100644
--- a/test/commandandcontrol/cc_client/main.go
+++ b/test/commandandcontrol/cc_client/main.go
@@ -19,7 +19,6 @@ package main
import (
"flag"
"fmt"
- "io/ioutil"
"net/url"
"os"
"os/signal"
@@ -65,7 +64,7 @@ func main() {
os.Exit(1)
}
- options, err := ioutil.ReadFile(*componentOptions)
+ options, err := os.ReadFile(*componentOptions)
if err != nil {
log.Errorf("failed to read options file %s", *componentOptions)
}
diff --git a/test/e2e-go/cli/algod/expect/algod_expect_test.go b/test/e2e-go/cli/algod/expect/algod_expect_test.go
index ec69fc715..06d54b9f3 100644
--- a/test/e2e-go/cli/algod/expect/algod_expect_test.go
+++ b/test/e2e-go/cli/algod/expect/algod_expect_test.go
@@ -19,12 +19,12 @@ import (
"testing"
"github.com/algorand/go-algorand/test/framework/fixtures"
- "github.com/algorand/go-algorand/test/partitiontest"
)
// TestAlgodWithExpect Process all expect script files with suffix Test.exp within the test/e2e-go/cli/algod/expect directory
func TestAlgodWithExpect(t *testing.T) {
- partitiontest.PartitionTest(t)
+ // partitiontest.PartitionTest(t)
+ // Causes double partition, so commented out on purpose
defer fixtures.ShutdownSynchronizedTest(t)
et := fixtures.MakeExpectTest(t)
diff --git a/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp b/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp
index be6fc0ab3..d26074e2f 100644
--- a/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp
+++ b/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp
@@ -37,7 +37,7 @@ proc spawnCatchpointCatchupWebProxy { TARGET_ENDPOINT RUNTIME REQUEST_DELAY } {
eof { ::AlgorandGoal::CheckEOF "web proxy failed to start"}
}
- puts "Web proxy listening address is $WEBPROXY_LISTEN_ADDRESS"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: Web proxy listening address is $WEBPROXY_LISTEN_ADDRESS"
return $WEBPROXY_LISTEN_ADDRESS
}
@@ -105,7 +105,7 @@ if { [catch {
exit 1
}
- puts "Primary node listening address is $PRIMARY_LISTEN_ADDRESS"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: Primary node listening address is $PRIMARY_LISTEN_ADDRESS"
# start the web proxy
set WP_SPAWN_ID 0
@@ -120,11 +120,11 @@ if { [catch {
set CATCHPOINT [::AlgorandGoal::GetNodeLastCatchpoint $TEST_ROOT_DIR/Primary]
- puts "Catchpoint is $CATCHPOINT"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: Catchpoint is $CATCHPOINT"
regexp -nocase {([0-9]*)#[A-Z2-7]*} $CATCHPOINT CATCHPOINT_ROUND CATCHPOINT_ROUND
- puts "Catchpoint round is $CATCHPOINT_ROUND"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: Catchpoint round is $CATCHPOINT_ROUND"
# wait for the primary to reach $CATCHPOINT_ROUND + 5, so that the catchpoint file would be saved
::AlgorandGoal::WaitForRound [expr {int($CATCHPOINT_ROUND + 5)}] $TEST_ROOT_DIR/Primary
@@ -138,7 +138,7 @@ if { [catch {
# close the web proxy
close -i $WP_SPAWN_ID
- puts "catchpointCatchupTest basic test completed"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: catchpointCatchupTest basic test completed"
} EXCEPTION ] } {
::AlgorandGoal::Abort "ERROR in catchpointCatchupTest - basic test: $EXCEPTION"
@@ -191,7 +191,7 @@ if { [catch {
::AlgorandGoal::StopNode $TEST_ROOT_DIR/Primary
- puts "catchpointCatchupTest stop/start test completed"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: catchpointCatchupTest stop/start test completed"
} EXCEPTION ] } {
::AlgorandGoal::Abort "ERROR in catchpointCatchupTest - stop/start: $EXCEPTION"
}
diff --git a/test/e2e-go/cli/goal/expect/catchpointCatchupWebProxy/webproxy.go b/test/e2e-go/cli/goal/expect/catchpointCatchupWebProxy/webproxy.go
index 8c574c4b1..3ba476438 100644
--- a/test/e2e-go/cli/goal/expect/catchpointCatchupWebProxy/webproxy.go
+++ b/test/e2e-go/cli/goal/expect/catchpointCatchupWebProxy/webproxy.go
@@ -58,7 +58,7 @@ func main() {
// prevent requests for block #2 to go through.
if strings.HasSuffix(request.URL.String(), "/block/2") {
response.WriteHeader(http.StatusBadRequest)
- response.Write([]byte("webProxy prevents block 2 from serving"))
+ response.Write([]byte("webProxy prevents block 2 from serving")) //nolint:errcheck // don't care
return
}
if *webProxyLogFile != "" {
diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
index f528dabb1..2f2f4f826 100644
--- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
+++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
@@ -44,48 +44,54 @@ package require Tcl 8.0
# Utility method to abort out of this script
proc ::AlgorandGoal::Abort { ERROR } {
- puts "Aborting with Error: $ERROR"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: Aborting with Error: $ERROR"
+ set LOGS_COLLECTED 0
if { [info exists ::GLOBAL_TEST_ROOT_DIR] } {
# terminate child algod processes, if there are active child processes the test will hang on a test failure
puts "GLOBAL_TEST_ROOT_DIR $::GLOBAL_TEST_ROOT_DIR"
puts "GLOBAL_NETWORK_NAME $::GLOBAL_NETWORK_NAME"
+ ::AlgorandGoal::StopNetwork $::GLOBAL_NETWORK_NAME $::GLOBAL_TEST_ROOT_DIR
+
log_user 1
set NODE_DATA_DIR $::GLOBAL_TEST_ROOT_DIR/Primary
- if { [info exists ::NODE_DATA_DIR] } {
+ if { [file exists $NODE_DATA_DIR] } {
set outLog [exec cat $NODE_DATA_DIR/algod-out.log]
- puts "$NODE_DATA_DIR/algod-out.log :\r\n$outLog"
+ puts "\n$NODE_DATA_DIR/algod-out.log:\r\n$outLog"
set errLog [exec cat $NODE_DATA_DIR/algod-err.log]
- puts "$NODE_DATA_DIR/algod-err.log :\r\n$errLog"
- set nodeLog [exec -- tail -n 30 $NODE_DATA_DIR/node.log]
- puts "$NODE_DATA_DIR/node.log :\r\n$nodeLog"
+ puts "\n$NODE_DATA_DIR/algod-err.log:\r\n$errLog"
+ set nodeLog [exec -- tail -n 50 $NODE_DATA_DIR/node.log]
+ puts "\n$NODE_DATA_DIR/node.log:\r\n$nodeLog"
+ set LOGS_COLLECTED 1
}
set NODE_DATA_DIR $::GLOBAL_TEST_ROOT_DIR/Node
- if { [info exists ::NODE_DATA_DIR] } {
+ puts "Node path $NODE_DATA_DIR"
+ if { [file exists $NODE_DATA_DIR] } {
set outLog [exec cat $NODE_DATA_DIR/algod-out.log]
- puts "$NODE_DATA_DIR/algod-out.log :\r\n$outLog"
+ puts "\n$NODE_DATA_DIR/algod-out.log:\r\n$outLog"
set errLog [exec cat $NODE_DATA_DIR/algod-err.log]
- puts "$NODE_DATA_DIR/algod-err.log :\r\n$errLog"
- set nodeLog [exec -- tail -n 30 $NODE_DATA_DIR/node.log]
- puts "$NODE_DATA_DIR/node.log :\r\n$nodeLog"
+ puts "\n$NODE_DATA_DIR/algod-err.log:\r\n$errLog"
+ set nodeLog [exec -- tail -n 50 $NODE_DATA_DIR/node.log]
+ puts "\n$NODE_DATA_DIR/node.log:\r\n$nodeLog"
+ set LOGS_COLLECTED 1
}
-
- ::AlgorandGoal::StopNetwork $::GLOBAL_NETWORK_NAME $::GLOBAL_TEST_ROOT_DIR
}
if { [info exists ::GLOBAL_TEST_ALGO_DIR] } {
puts "GLOBAL_TEST_ALGO_DIR $::GLOBAL_TEST_ALGO_DIR"
- log_user 1
- set outLog [exec cat $::GLOBAL_TEST_ALGO_DIR/algod-out.log]
- puts "$::GLOBAL_TEST_ALGO_DIR/algod-out.log :\r\n$outLog"
- set errLog [exec cat $::GLOBAL_TEST_ALGO_DIR/algod-err.log]
- puts "$NODE_DATA_DIR/algod-err.log :\r\n$errLog"
- set nodeLog [exec -- tail -n 30 $::GLOBAL_TEST_ALGO_DIR/node.log]
- puts "$::GLOBAL_TEST_ALGO_DIR/node.log :\r\n$nodeLog"
-
::AlgorandGoal::StopNode $::GLOBAL_TEST_ALGO_DIR
+
+ if { $LOGS_COLLECTED == 0 } {
+ log_user 1
+ set outLog [exec cat $::GLOBAL_TEST_ALGO_DIR/algod-out.log]
+ puts "\n$::GLOBAL_TEST_ALGO_DIR/algod-out.log:\r\n$outLog"
+ set errLog [exec cat $::GLOBAL_TEST_ALGO_DIR/algod-err.log]
+ puts "\n$::GLOBAL_TEST_ALGO_DIR/algod-err.log:\r\n$errLog"
+ set nodeLog [exec -- tail -n 50 $::GLOBAL_TEST_ALGO_DIR/node.log]
+ puts "\n$::GLOBAL_TEST_ALGO_DIR/node.log:\r\n$nodeLog"
+ }
}
exit 1
@@ -137,7 +143,7 @@ proc ::AlgorandGoal::StartNode { TEST_ALGO_DIR {SYSTEMD_MANAGED "False"} {PEER_A
set GOAL_PARAMS "$GOAL_PARAMS -p $PEER_ADDRESS"
}
if { [catch {
- puts "node start with $TEST_ALGO_DIR"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: node start with $TEST_ALGO_DIR"
spawn goal {*}$GOAL_PARAMS
if { $SYSTEMD_MANAGED eq "True" } {
expect {
@@ -164,7 +170,7 @@ proc ::AlgorandGoal::StopNode { TEST_ALGO_DIR {SYSTEMD_MANAGED ""} } {
set timeout 15
if { [catch {
- puts "node stop with $TEST_ALGO_DIR"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: node stop with $TEST_ALGO_DIR"
if { $SYSTEMD_MANAGED eq "" } {
spawn goal node stop -d $TEST_ALGO_DIR
expect {
@@ -192,7 +198,7 @@ proc ::AlgorandGoal::RestartNode { TEST_ALGO_DIR {SYSTEMD_MANAGED ""} } {
set timeout 30
if { [catch {
- puts "node restart with $TEST_ALGO_DIR"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: node restart with $TEST_ALGO_DIR"
if { $SYSTEMD_MANAGED eq "" } {
spawn goal node restart -d $TEST_ALGO_DIR
expect {
@@ -241,7 +247,7 @@ proc ::AlgorandGoal::StartNetwork { NETWORK_NAME NETWORK_TEMPLATE TEST_ROOT_DIR
if { [catch {
# Start network
- puts "network start $NETWORK_NAME"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: network start $NETWORK_NAME"
spawn goal network start -r $TEST_ROOT_DIR
expect {
timeout { close; ::AlgorandGoal::Abort "Timed out starting network" }
@@ -272,7 +278,7 @@ proc ::AlgorandGoal::StartNetwork { NETWORK_NAME NETWORK_TEMPLATE TEST_ROOT_DIR
proc ::AlgorandGoal::StopNetwork { NETWORK_NAME TEST_ROOT_DIR } {
set timeout 60
set NETWORK_STOP_MESSAGE ""
- puts "Stopping network: $NETWORK_NAME"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: Stopping network: $NETWORK_NAME"
spawn goal network stop -r $TEST_ROOT_DIR
expect {
timeout {
@@ -896,7 +902,7 @@ proc ::AlgorandGoal::GetNodeLastCommittedBlock { NODE_DATA_DIR } {
proc ::AlgorandGoal::StartCatchup { NODE_DATA_DIR CATCHPOINT } {
if { [catch {
# start catchup
- puts "spawn node catchup $CATCHPOINT"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: spawn node catchup $CATCHPOINT"
spawn goal node catchup $CATCHPOINT -d $NODE_DATA_DIR
expect {
timeout { ::AlgorandGoal::Abort "goal node catchup timed out" }
@@ -914,7 +920,7 @@ proc ::AlgorandGoal::WaitCatchup { TEST_PRIMARY_NODE_DIR WAIT_DURATION_SEC } {
set i 0
while { $i < $WAIT_DURATION_SEC } {
# Check node status
- puts "spawn node status "
+ puts "[clock format [clock seconds] -format %H:%M:%S]: spawn node status "
spawn goal node status -d $TEST_PRIMARY_NODE_DIR
expect {
timeout { ::AlgorandGoal::Abort "goal node status timed out" }
@@ -967,22 +973,22 @@ proc ::AlgorandGoal::WaitForRound { WAIT_FOR_ROUND_NUMBER NODE_DATA_DIR } {
eof {
catch wait result;
if { [lindex $result 3] != 0 } {
- ::AlgorandGoal::Abort "failed to wait for round : error code [lindex $result 3]"
+ ::AlgorandGoal::Abort "failed to wait for round : error code [lindex $result 3], output: $expect_out(buffer)"
}
}
}
log_user 1
if { $BLOCK > -1 } {
- puts "node status check complete, current round is $BLOCK"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: node status check complete, current round is $BLOCK"
} else {
::AlgorandGoal::Abort "failed to retrieve block round number"
}
# Check if the round number is reached
if { $BLOCK >= $WAIT_FOR_ROUND_NUMBER } {
- puts "Reached Round number: $WAIT_FOR_ROUND_NUMBER"; break
+ puts "[clock format [clock seconds] -format %H:%M:%S]: Reached Round number: $WAIT_FOR_ROUND_NUMBER"; break
} else {
- puts "Current Round: '$BLOCK' is less than wait for round: '$WAIT_FOR_ROUND_NUMBER'"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: Current Round: '$BLOCK' is less than wait for round: '$WAIT_FOR_ROUND_NUMBER'"
if { $LAST_ROUND >= $BLOCK } {
# no progress was made since last time we checked.
incr SLEEP_TIME
diff --git a/test/e2e-go/cli/goal/expect/goalFormattingTest.exp b/test/e2e-go/cli/goal/expect/goalFormattingTest.exp
index 054406479..cfa0af63f 100644
--- a/test/e2e-go/cli/goal/expect/goalFormattingTest.exp
+++ b/test/e2e-go/cli/goal/expect/goalFormattingTest.exp
@@ -26,7 +26,7 @@ if { [catch {
set NON_PRINTABLE_CHARS_WARNING 1
exp_continue
}
- {Cannot decode transactions from *: msgpack decode error \[pos 33\]: no matching struct field found when decoding stream map with key \[0G\[0K\[33munexpected_key\[0m} {
+ {Cannot decode transactions from *: Unknown field: \[0G\[0K\[33munexpected_key\[0m} {
set CANNOT_DECODE_MESSAGE 1
exp_continue
}
diff --git a/test/e2e-go/cli/goal/expect/statefulTealAppInfoTest.exp b/test/e2e-go/cli/goal/expect/statefulTealAppInfoTest.exp
deleted file mode 100644
index 6414f05f4..000000000
--- a/test/e2e-go/cli/goal/expect/statefulTealAppInfoTest.exp
+++ /dev/null
@@ -1,159 +0,0 @@
-#!/usr/bin/expect -f
-#exp_internal 1
-set err 0
-log_user 1
-
-source goalExpectCommon.exp
-
-set TEST_ALGO_DIR [lindex $argv 0]
-set TEST_DATA_DIR [lindex $argv 1]
-
-proc statefulTealAppInfoTest { TEST_ALGO_DIR TEST_DATA_DIR} {
-
- set timeout 60
- set TIME_STAMP [clock seconds]
-
- set TEST_ROOT_DIR $TEST_ALGO_DIR/root_$TIME_STAMP
- set TEST_PRIMARY_NODE_DIR $TEST_ROOT_DIR/Primary/
- set NETWORK_NAME test_net_expect_$TIME_STAMP
- set NETWORK_TEMPLATE "$TEST_DATA_DIR/nettemplates/TwoNodes50EachFuture.json"
-
- exec cp $TEST_DATA_DIR/../../installer/genesis/devnet/genesis.json $TEST_ALGO_DIR
-
- # Create network
- ::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
-
- # Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
-
- set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ]
- puts "Primary Node Address: $PRIMARY_NODE_ADDRESS"
-
- set PRIMARY_WALLET_NAME unencrypted-default-wallet
-
- # Determine primary account
- set PRIMARY_ACCOUNT_ADDRESS [::AlgorandGoal::GetHighestFundedAccountForWallet $PRIMARY_WALLET_NAME $TEST_PRIMARY_NODE_DIR]
-
- # Check the balance of the primary account
- set PRIMARY_ACCOUNT_BALANCE [::AlgorandGoal::GetAccountBalance $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR]
- puts "Primary Account Balance: $PRIMARY_ACCOUNT_BALANCE"
-
- ::AlgorandGoal::WaitForRound 1 $TEST_PRIMARY_NODE_DIR
-
- set TEAL_PROGS_DIR "$TEST_DATA_DIR/../scripts/e2e_subs/tealprogs"
-
- # Network Setup complete
- #----------------------
-
- puts "calling app compile"
- ::AlgorandGoal::AppCompile ${TEAL_PROGS_DIR}/upgraded.teal ${TEST_ROOT_DIR}/upgraded.tealc $TEST_PRIMARY_NODE_DIR
- puts "computing target hash"
-
- puts "compute target hash"
- set TARGET_HASH [ exec shasum -a 256 "${TEST_ROOT_DIR}/upgraded.tealc" | awk {{print $1}} ]
- puts "TARGET_HASH ${TARGET_HASH}"
-
- # Compile dummy, wrong contract
- ::AlgorandGoal::AppCompile ${TEAL_PROGS_DIR}/wrongupgrade.teal ${TEST_ROOT_DIR}/wrongupgrade.tealc $TEST_PRIMARY_NODE_DIR
-
- # Copy template
- exec cp ${TEAL_PROGS_DIR}/bootloader.teal.tmpl ${TEST_ROOT_DIR}/bootloader.teal
-
- # Substitute template values
- exec sed -i"" -e "s/TMPL_APPROV_HASH/${TARGET_HASH}/g" ${TEST_ROOT_DIR}/bootloader.teal
- exec sed -i"" -e "s/TMPL_CLEARSTATE_HASH/${TARGET_HASH}/g" ${TEST_ROOT_DIR}/bootloader.teal
-
- # Create an app using filled-in bootloader template
- puts "calling app create"
- set GLOBAL_BYTE_SLICES 1
- set LOCAL_BYTE_SLICES 0
- set APP_ID [::AlgorandGoal::AppCreate0 $PRIMARY_WALLET_NAME "" $PRIMARY_ACCOUNT_ADDRESS ${TEST_ROOT_DIR}/bootloader.teal $GLOBAL_BYTE_SLICES $LOCAL_BYTE_SLICES ${TEAL_PROGS_DIR}/clear_program_state.teal $TEST_PRIMARY_NODE_DIR]
-
- # Application setup complete
- #----------------------
-
- # Calling app as an update but with right scripts should succeed
- spawn goal app info --app-id $APP_ID -d $TEST_PRIMARY_NODE_DIR
- expect {
- timeout { puts timeout; ::AlgorandGoal::Abort "\n Failed to see expected output" }
- -re {^Application ID:\s+(\d+)\r\n} {set APP_INFO_ID $expect_out(1,string) ; exp_continue }
- -re {Creator:\s+([A-Z0-9]+)\r\n} {set APP_INFO_CREATOR $expect_out(1,string) ; exp_continue }
- -re {Approval hash:\s+([A-Z0-9]+)\r\n} {set APP_INFO_APPROVAL_HASH $expect_out(1,string); exp_continue }
- -re {Clear hash:\s+([A-Z0-9]+)\r\n} {set APP_INFO_CLEAR_HASH $expect_out(1,string); exp_continue }
- -re {Max global byteslices:\s+(\d+)\r\n} {set APP_INFO_GLOBAL_BYTESLICES $expect_out(1,string); exp_continue }
- -re {Max global integers:\s+(\d+)\r\n} {set APP_INFO_GLOBAL_INTEGERS $expect_out(1,string) ; exp_continue }
- -re {Max local byteslices:\s+(\d+)\r\n} {set APP_INFO_LOCAL_BYTESLICES $expect_out(1,string) ; exp_continue }
- -re {Max local integers:\s+(\d+)\r\n} {set APP_INFO_LOCAL_INTEGERS $expect_out(1,string) ; close }
- eof {close; ::AlgorandGoal::Abort "app update failed" }
- }
- puts "APP_INFO_ID $APP_INFO_ID"
- puts "APP_INFO_CREATOR $APP_INFO_CREATOR"
- puts "APP_INFO_APPROVAL_HASH $APP_INFO_APPROVAL_HASH"
- puts "APP_INFO_CLEAR_HASH $APP_INFO_CLEAR_HASH"
- puts "APP_INFO_GLOBAL_BYTESLICES $APP_INFO_GLOBAL_BYTESLICES"
- puts "APP_INFO_GLOBAL_INTEGERS $APP_INFO_GLOBAL_INTEGERS"
- puts "APP_INFO_LOCAL_BYTESLICES $APP_INFO_LOCAL_BYTESLICES"
- puts "APP_INFO_LOCAL_INTEGERS $APP_INFO_LOCAL_INTEGERS"
-
- set errors 0
- if { $APP_INFO_ID != $APP_ID } {
- puts "error APP_INFO_ID $APP_INFO_ID does not match expected $APP_ID" ; incr errors
- }
- if { $APP_INFO_CREATOR != $PRIMARY_ACCOUNT_ADDRESS } {
- puts "error APP_INFO_CREATOR $APP_INFO_CREATOR does not match expected $PRIMARY_ACCOUNT_ADDRESS" ; incr errors
- }
- set EXPECTED_APP_INFO_APPROVAL_HASH "AJM7G3WXKKL6YTITFNRYT53HRFKHKWGTEZF6UZXKSUNO6GI7FOBCA7LDTU"
- if { $APP_INFO_APPROVAL_HASH != "AJM7G3WXKKL6YTITFNRYT53HRFKHKWGTEZF6UZXKSUNO6GI7FOBCA7LDTU" } {
- puts "error APP_INFO_APPROVAL_HASH $APP_INFO_APPROVAL_HASH does not match expected $EXPECTED_APP_INFO_APPROVAL_HASH" ; incr errors
- }
- set EXPECTED_APP_INFO_CLEAR_HASH "YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA"
- if { $APP_INFO_CLEAR_HASH != $EXPECTED_APP_INFO_CLEAR_HASH } {
- puts "error APP_INFO_CLEAR_HASH $APP_INFO_CLEAR_HASH does not match expected $EXPECTED_APP_INFO_CLEAR_HASH" ; incr errors
- }
- set EXPECTED_APP_INFO_GLOBAL_BYTESLICES 1
- if { $APP_INFO_GLOBAL_BYTESLICES != $EXPECTED_APP_INFO_GLOBAL_BYTESLICES } {
- puts "error APP_INFO_GLOBAL_BYTESLICES $APP_INFO_GLOBAL_BYTESLICES does not match expected $EXPECTED_APP_INFO_GLOBAL_BYTESLICES" ; incr errors
- }
- set EXPECTED_APP_INFO_GLOBAL_INTEGERS 0
- if { $APP_INFO_GLOBAL_INTEGERS != $EXPECTED_APP_INFO_GLOBAL_INTEGERS } {
- puts "error APP_INFO_GLOBAL_INTEGERS $APP_INFO_GLOBAL_INTEGERS does not match expected $EXPECTED_APP_INFO_GLOBAL_INTEGERS" ; incr errors
- }
- set EXPECTED_APP_INFO_LOCAL_BYTESLICES 0
- if { $APP_INFO_LOCAL_BYTESLICES != $EXPECTED_APP_INFO_LOCAL_BYTESLICES } {
- puts "error APP_INFO_LOCAL_BYTESLICES $APP_INFO_LOCAL_BYTESLICES does not match expected $EXPECTED_APP_INFO_LOCAL_BYTESLICES" ; incr errors
- }
- set EXPECTED_APP_INFO_LOCAL_INTEGERS 0
- if { $APP_INFO_LOCAL_INTEGERS != $EXPECTED_APP_INFO_LOCAL_INTEGERS } {
- puts "error APP_INFO_LOCAL_INTEGERS $APP_INFO_LOCAL_INTEGERS does not match expected $EXPECTED_APP_INFO_LOCAL_INTEGERS" ; incr errors
- }
-
- if { $errors > 0 } {
- puts "there were a total of $errors"
- ::AlgorandGoal::Abort "ERROR in statefulTealAppInfoTest"
- } else {
- puts "app info test was successful"
- }
-
- # Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
-
- puts "Goal statefulTealAppInfoTest Successful"
-
-}
-
-
-if { [catch {
- source goalExpectCommon.exp
-
- puts "starting statefulTealAppInfoTest"
-
- puts "TEST_ALGO_DIR: $TEST_ALGO_DIR"
- puts "TEST_DATA_DIR: $TEST_DATA_DIR"
-
- statefulTealAppInfoTest $TEST_ALGO_DIR $TEST_DATA_DIR
-
- exit 0
-
-} EXCEPTION ] } {
- ::AlgorandGoal::Abort "ERROR in statefulTealAppInfoTest: $EXCEPTION"
-}
diff --git a/test/e2e-go/features/participation/participationRewards_test.go b/test/e2e-go/features/participation/participationRewards_test.go
index a7dd6452e..3c8a29371 100644
--- a/test/e2e-go/features/participation/participationRewards_test.go
+++ b/test/e2e-go/features/participation/participationRewards_test.go
@@ -347,8 +347,7 @@ func TestRewardRateRecalculation(t *testing.T) {
r.NoError(err)
minFee, minBal, err := fixture.MinFeeAndBalance(curStatus.LastRound)
r.NoError(err)
- deadline := curStatus.LastRound + uint64(5)
- fixture.SendMoneyAndWait(deadline, amountToSend, minFee, richAccount.Address, rewardsAccount, "")
+ fixture.SendMoneyAndWait(curStatus.LastRound, amountToSend, minFee, richAccount.Address, rewardsAccount, "")
blk, err := client.Block(curStatus.LastRound)
r.NoError(err)
@@ -373,8 +372,7 @@ func TestRewardRateRecalculation(t *testing.T) {
curStatus, err = client.Status()
r.NoError(err)
- deadline = curStatus.LastRound + uint64(5)
- fixture.SendMoneyAndWait(deadline, amountToSend, minFee, richAccount.Address, rewardsAccount, "")
+ fixture.SendMoneyAndWait(curStatus.LastRound, amountToSend, minFee, richAccount.Address, rewardsAccount, "")
rewardRecalcRound = rewardRecalcRound + consensusParams.RewardsRateRefreshInterval
diff --git a/test/e2e-go/features/stateproofs/stateproofs_test.go b/test/e2e-go/features/stateproofs/stateproofs_test.go
index 456c7b3cb..a3b72180f 100644
--- a/test/e2e-go/features/stateproofs/stateproofs_test.go
+++ b/test/e2e-go/features/stateproofs/stateproofs_test.go
@@ -19,7 +19,6 @@ package stateproofs
import (
"bytes"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -641,7 +640,7 @@ func TestUnableToRecoverFromLaggingStateProofChain(t *testing.T) {
// installParticipationKey generates a new key for a given account and installs it with the client.
func installParticipationKey(t *testing.T, client libgoal.Client, addr string, firstValid, lastValid uint64) (resp generated.PostParticipationResponse, part account.Participation, err error) {
- dir, err := ioutil.TempDir("", "temporary_partkey_dir")
+ dir, err := os.MkdirTemp("", "temporary_partkey_dir")
require.NoError(t, err)
defer os.RemoveAll(dir)
@@ -676,14 +675,10 @@ func registerParticipationAndWait(t *testing.T, client libgoal.Client, part acco
// After making the first Stateproof, we transfer three-quarters of the stake of the
// rich node to the poor node. For both cases, we assert different stakes, that is, to
// conclude whether the poor node is used to create the StateProof or the rich node.
-func TestAttestorsChangeTest(t *testing.T) {
+func TestAttestorsChange(t *testing.T) {
partitiontest.PartitionTest(t)
defer fixtures.ShutdownSynchronizedTest(t)
- if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
- t.Skip("This test is difficult for ARM")
- }
-
a := require.New(fixtures.SynchronizedTest(t))
consensusParams := getDefaultStateProofConsensusParams()
@@ -718,7 +713,7 @@ func TestAttestorsChangeTest(t *testing.T) {
from: accountFetcher{nodeName: "richNode", accountNumber: 0},
to: accountFetcher{nodeName: "poorNode", accountNumber: 0},
}
-
+ sum := uint64(0)
for rnd := uint64(1); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ {
// Changing the amount to pay. This should transfer most of the money from the rich node to the poor node.
if consensusParams.StateProofInterval*2 == rnd {
@@ -739,15 +734,10 @@ func TestAttestorsChangeTest(t *testing.T) {
blk, err := libgoal.BookkeepingBlock(rnd)
a.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd)
- if (rnd % consensusParams.StateProofInterval) == 0 {
- // Must have a merkle commitment for participants
- a.True(len(blk.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment) > 0)
- a.True(blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != basics.MicroAlgos{})
-
- stake := blk.BlockHeader.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight.ToUint64()
-
+ // We sample the accounts' balances StateProofVotersLookback rounds before state proof round.
+ if (rnd+consensusParams.StateProofVotersLookback)%consensusParams.StateProofInterval == 0 {
+ sum = 0
// the main part of the test (computing the total stake of the nodes):
- sum := uint64(0)
for i := 1; i <= 3; i++ {
sum += accountFetcher{fmt.Sprintf("Node%d", i), 0}.getBalance(a, &fixture)
}
@@ -755,6 +745,14 @@ func TestAttestorsChangeTest(t *testing.T) {
richNodeStake := accountFetcher{"richNode", 0}.getBalance(a, &fixture)
poorNodeStake := accountFetcher{"poorNode", 0}.getBalance(a, &fixture)
sum = sum + richNodeStake + poorNodeStake
+ }
+
+ if (rnd % consensusParams.StateProofInterval) == 0 {
+ // Must have a merkle commitment for participants
+ a.True(len(blk.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment) > 0)
+ a.True(blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != basics.MicroAlgos{})
+
+ stake := blk.BlockHeader.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight.ToUint64()
a.Equal(sum, stake)
diff --git a/test/e2e-go/features/transactions/asset_test.go b/test/e2e-go/features/transactions/asset_test.go
index 520f8af0e..9616f3cb0 100644
--- a/test/e2e-go/features/transactions/asset_test.go
+++ b/test/e2e-go/features/transactions/asset_test.go
@@ -21,6 +21,7 @@ import (
"path/filepath"
"strings"
"testing"
+ "time"
"github.com/stretchr/testify/require"
@@ -970,6 +971,13 @@ func TestAssetCreateWaitRestartDelete(t *testing.T) {
verifyAssetParameters(asset, "test", "testunit", manager, reserve, freeze, clawback,
assetMetadataHash, assetURL, a)
+ // Ensure manager is funded before submitting any transactions
+ currentRound, err := client.CurrentRound()
+ a.NoError(err)
+
+ err = fixture.WaitForAccountFunded(currentRound+5, manager)
+ a.NoError(err)
+
// Destroy the asset
tx, err := client.MakeUnsignedAssetDestroyTx(assetIndex)
a.NoError(err)
@@ -1009,6 +1017,8 @@ func TestAssetCreateWaitBalLookbackDelete(t *testing.T) {
consensusParams.SeedLookback = 2
consensusParams.SeedRefreshInterval = 8
consensusParams.MaxBalLookback = 2 * consensusParams.SeedLookback * consensusParams.SeedRefreshInterval // 32
+ consensusParams.AgreementFilterTimeoutPeriod0 = 400 * time.Millisecond
+ consensusParams.AgreementFilterTimeout = 400 * time.Millisecond
configurableConsensus[consensusVersion] = consensusParams
@@ -1059,6 +1069,13 @@ func TestAssetCreateWaitBalLookbackDelete(t *testing.T) {
verifyAssetParameters(asset, "test", "testunit", manager, reserve, freeze, clawback,
assetMetadataHash, assetURL, a)
+ // Ensure manager is funded before submitting any transactions
+ currentRound, err := client.CurrentRound()
+ a.NoError(err)
+
+ err = fixture.WaitForAccountFunded(currentRound+5, manager)
+ a.NoError(err)
+
// Destroy the asset
tx, err := client.MakeUnsignedAssetDestroyTx(assetIndex)
a.NoError(err)
@@ -1169,8 +1186,8 @@ func verifyAssetParameters(asset v1.AssetParams,
unitName, assetName, manager, reserve, freeze, clawback string,
metadataHash []byte, assetURL string, asser *require.Assertions) {
- asser.Equal(asset.UnitName, "test")
- asser.Equal(asset.AssetName, "testunit")
+ asser.Equal(asset.UnitName, unitName)
+ asser.Equal(asset.AssetName, assetName)
asser.Equal(asset.ManagerAddr, manager)
asser.Equal(asset.ReserveAddr, reserve)
asser.Equal(asset.FreezeAddr, freeze)
diff --git a/test/e2e-go/kmd/e2e_kmd_wallet_multisig_test.go b/test/e2e-go/kmd/e2e_kmd_wallet_multisig_test.go
index 3b43606d6..be8f27e76 100644
--- a/test/e2e-go/kmd/e2e_kmd_wallet_multisig_test.go
+++ b/test/e2e-go/kmd/e2e_kmd_wallet_multisig_test.go
@@ -440,7 +440,6 @@ func TestMultisigSignProgram(t *testing.T) {
err = protocol.Decode(resp3.Multisig, &msig)
a.NoError(err)
- ok, err := crypto.MultisigVerify(logic.Program(program), crypto.Digest(msigAddr), msig)
+ err = crypto.MultisigVerify(logic.Program(program), crypto.Digest(msigAddr), msig)
a.NoError(err)
- a.True(ok)
}
diff --git a/test/e2e-go/upgrades/rekey_support_test.go b/test/e2e-go/upgrades/rekey_support_test.go
index 4988b79c4..56f5b6408 100644
--- a/test/e2e-go/upgrades/rekey_support_test.go
+++ b/test/e2e-go/upgrades/rekey_support_test.go
@@ -128,10 +128,10 @@ func TestRekeyUpgrade(t *testing.T) {
_, err = client.BroadcastTransaction(rekeyed)
// non empty err means the upgrade have not happened yet (as expected), ensure the error
if err != nil {
- // should be either "nonempty AuthAddr but rekeying not supported" or "txn dead"
- if !strings.Contains(err.Error(), "nonempty AuthAddr but rekeying not supported") &&
+ // should be either "nonempty AuthAddr but rekeying is not supported" or "txn dead"
+ if !strings.Contains(err.Error(), "nonempty AuthAddr but rekeying is not supported") &&
!strings.Contains(err.Error(), "txn dead") {
- a.NoErrorf(err, "error message should be one of :\n%s\n%s", "nonempty AuthAddr but rekeying not supported", "txn dead")
+ a.NoErrorf(err, "error message should be one of :\n%s\n%s", "nonempty AuthAddr but rekeying is not supported", "txn dead")
}
} else {
// if we had no error it must mean that we've upgraded already. Verify that.
diff --git a/test/framework/fixtures/baseFixture.go b/test/framework/fixtures/baseFixture.go
index 37086e536..a2205d3c6 100644
--- a/test/framework/fixtures/baseFixture.go
+++ b/test/framework/fixtures/baseFixture.go
@@ -18,7 +18,6 @@ package fixtures
import (
"fmt"
- "io/ioutil"
"os"
"path"
"runtime"
@@ -55,7 +54,7 @@ func (f *baseFixture) initialize(instance Fixture) {
}
f.testDir = os.Getenv("TESTDIR")
if f.testDir == "" {
- f.testDir, _ = ioutil.TempDir("", "tmp")
+ f.testDir, _ = os.MkdirTemp("", "tmp")
f.testDirTmp = true
}
f.testDataDir = os.Getenv("TESTDATADIR")
diff --git a/test/framework/fixtures/expectFixture.go b/test/framework/fixtures/expectFixture.go
index 3d7293d40..35b748968 100644
--- a/test/framework/fixtures/expectFixture.go
+++ b/test/framework/fixtures/expectFixture.go
@@ -18,7 +18,6 @@ package fixtures
import (
"bytes"
- "fmt"
"os"
"os/exec"
"path"
@@ -148,7 +147,7 @@ func (ef *ExpectFixture) Run() {
if match, _ := regexp.MatchString(ef.testFilter, testName); match {
ef.t.Run(testName, func(t *testing.T) {
if reason, ok := disabledTest[testName]; ok {
- t.Skip(fmt.Sprintf("Skipping %s test: %s", testName, reason))
+ t.Skipf("Skipping %s test: %s", testName, reason)
}
partitiontest.PartitionTest(t) // Check if this expect test should by run, may SKIP
diff --git a/test/framework/fixtures/kmdFixture.go b/test/framework/fixtures/kmdFixture.go
index 75a357f2a..db4794d3d 100644
--- a/test/framework/fixtures/kmdFixture.go
+++ b/test/framework/fixtures/kmdFixture.go
@@ -17,7 +17,6 @@
package fixtures
import (
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -133,14 +132,14 @@ func (f *KMDFixture) SetupWithConfig(t TestingTB, config string) {
// Write a token
f.APIToken = defaultAPIToken
tokenFilepath := filepath.Join(f.kmdDir, "kmd.token")
- err := ioutil.WriteFile(tokenFilepath, f.APIToken, 0640)
+ err := os.WriteFile(tokenFilepath, f.APIToken, 0640)
require.NoError(f.t, err)
if config == "" {
config = defaultConfig
}
configFilepath := filepath.Join(f.kmdDir, "kmd_config.json")
- err = ioutil.WriteFile(configFilepath, []byte(config), 0640)
+ err = os.WriteFile(configFilepath, []byte(config), 0640)
require.NoError(f.t, err)
// Start kmd
@@ -197,7 +196,7 @@ func (f *KMDFixture) MakeWalletAndHandleToken() (handleToken string, err error)
func (f *KMDFixture) TestConfig(cfg []byte) error {
// Write the passed config
configFilepath := filepath.Join(f.kmdDir, "kmd_config.json")
- err := ioutil.WriteFile(configFilepath, cfg, 0640)
+ err := os.WriteFile(configFilepath, cfg, 0640)
if err != nil {
return err
}
diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go
index af84e4d2e..746a0c2f9 100644
--- a/test/framework/fixtures/libgoalFixture.go
+++ b/test/framework/fixtures/libgoalFixture.go
@@ -17,8 +17,8 @@
package fixtures
import (
+ "bufio"
"fmt"
- "io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -135,7 +135,7 @@ func (f *LibGoalFixture) importRootKeys(lg *libgoal.Client, dataDir string) {
}
keyDir := filepath.Join(dataDir, genID)
- files, err := ioutil.ReadDir(keyDir)
+ files, err := os.ReadDir(keyDir)
if err != nil {
return
}
@@ -311,6 +311,10 @@ func (f *LibGoalFixture) ShutdownImpl(preserveData bool) {
f.NC.StopKMD()
if preserveData {
f.network.Stop(f.binDir)
+ f.dumpLogs(filepath.Join(f.PrimaryDataDir(), "node.log"))
+ for _, nodeDir := range f.NodeDataDirs() {
+ f.dumpLogs(filepath.Join(nodeDir, "node.log"))
+ }
} else {
f.network.Delete(f.binDir)
@@ -324,6 +328,24 @@ func (f *LibGoalFixture) ShutdownImpl(preserveData bool) {
}
}
+// dumpLogs prints out log files for the running nodes
+func (f *LibGoalFixture) dumpLogs(filePath string) {
+ file, err := os.Open(filePath)
+ if err != nil {
+ f.t.Logf("could not open %s", filePath)
+ return
+ }
+ defer file.Close()
+
+ f.t.Log("=================================\n")
+ parts := strings.Split(filePath, "/")
+ f.t.Logf("%s/%s:", parts[len(parts)-2], parts[len(parts)-1]) // Primary/node.log
+ scanner := bufio.NewScanner(file)
+ for scanner.Scan() {
+ f.t.Logf(scanner.Text())
+ }
+}
+
// intercept baseFixture.failOnError so we can clean up any algods that are still alive
func (f *LibGoalFixture) failOnError(err error, message string) {
if err != nil {
diff --git a/test/framework/fixtures/restClientFixture.go b/test/framework/fixtures/restClientFixture.go
index 7265c560c..c9f3befa8 100644
--- a/test/framework/fixtures/restClientFixture.go
+++ b/test/framework/fixtures/restClientFixture.go
@@ -18,6 +18,7 @@ package fixtures
import (
"fmt"
+ "github.com/algorand/go-algorand/data/basics"
"sort"
"time"
"unicode"
@@ -25,7 +26,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/daemon/algod/api/client"
- "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+ v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/nodecontrol"
"github.com/algorand/go-algorand/test/e2e-go/globals"
@@ -265,12 +266,56 @@ func (f *RestClientFixture) WaitForAllTxnsToConfirm(roundTimeout uint64, txidsAn
for txid, addr := range txidsAndAddresses {
_, err := f.WaitForConfirmedTxn(roundTimeout, addr, txid)
if err != nil {
+ f.t.Logf("txn failed to confirm: ", addr, txid)
+ pendingTxns, err := f.AlgodClient.GetPendingTransactions(0)
+ if err == nil {
+ pendingTxids := make([]string, 0, pendingTxns.TotalTxns)
+ for _, txn := range pendingTxns.TruncatedTxns.Transactions {
+ pendingTxids = append(pendingTxids, txn.TxID)
+ }
+ f.t.Logf("pending txids: ", pendingTxids)
+ } else {
+ f.t.Logf("unable to log pending txns, ", err)
+ }
+ allTxids := make([]string, 0, len(txidsAndAddresses))
+ for txID := range txidsAndAddresses {
+ allTxids = append(allTxids, txID)
+ }
+ f.t.Logf("all txids: ", allTxids)
return false
}
}
return true
}
+// WaitForAccountFunded waits until either the passed account gets non-empty balance
+// or until the passed roundTimeout passes
+// or until waiting for a round to pass times out
+func (f *RestClientFixture) WaitForAccountFunded(roundTimeout uint64, accountAddress string) (err error) {
+ client := f.AlgodClient
+ for {
+ // Get current round information
+ curStatus, statusErr := client.Status()
+ require.NoError(f.t, statusErr, "fixture should be able to get node status")
+ curRound := curStatus.LastRound
+
+ // Check if we know about the transaction yet
+ acct, acctErr := client.AccountInformation(accountAddress)
+ require.NoError(f.t, acctErr, "fixture should be able to get account info")
+ if acct.Amount > 0 {
+ return nil
+ }
+
+ // Check if we should wait a round
+ if curRound > roundTimeout {
+ return fmt.Errorf("failed to see confirmed transaction by round %v", roundTimeout)
+ }
+ // Wait a round
+ err = f.WaitForRoundWithTimeout(curRound + 1)
+ require.NoError(f.t, err, "fixture should be able to wait for one round to pass")
+ }
+}
+
// SendMoneyAndWait uses the rest client to send money and WaitForTxnConfirmation to wait for the send to confirm
// it adds some extra error checking as well
func (f *RestClientFixture) SendMoneyAndWait(curRound, amountToSend, transactionFee uint64, fromAccount, toAccount string, closeToAccount string) (txn v1.Transaction) {
@@ -284,7 +329,8 @@ func (f *RestClientFixture) SendMoneyAndWait(curRound, amountToSend, transaction
// SendMoneyAndWaitFromWallet is as above, but for a specific wallet
func (f *RestClientFixture) SendMoneyAndWaitFromWallet(walletHandle, walletPassword []byte, curRound, amountToSend, transactionFee uint64, fromAccount, toAccount string, closeToAccount string) (txn v1.Transaction) {
client := f.LibGoalClient
- fundingTx, err := client.SendPaymentFromWallet(walletHandle, walletPassword, fromAccount, toAccount, transactionFee, amountToSend, nil, closeToAccount, 0, 0)
+ // use one curRound - 1 in case other nodes are behind
+ fundingTx, err := client.SendPaymentFromWallet(walletHandle, walletPassword, fromAccount, toAccount, transactionFee, amountToSend, nil, closeToAccount, basics.Round(curRound).SubSaturate(1), 0)
require.NoError(f.t, err, "client should be able to send money from rich to poor account")
require.NotEmpty(f.t, fundingTx.ID().String(), "transaction ID should not be empty")
waitingDeadline := curRound + uint64(5)
diff --git a/test/heapwatch/README.md b/test/heapwatch/README.md
index 27cb54d31..04c0568be 100644
--- a/test/heapwatch/README.md
+++ b/test/heapwatch/README.md
@@ -1,14 +1,54 @@
# Heap Watch
-Tools for checking if algod has memory leaks.
+Collect RAM, bandwidth, and other stats over the course of a test cluster run.
-Run a local private network of three nodes and two pingpongs.
+Produce reports and plots from data.
-Periodically sample pprof memory profiles.
+## Scripts
-Watch memory usage from `ps` and write to a CSV file for each algod.
+* heapWatch.py
+ * collect data from algod
+ * heap profiling, /metrics, cpu profiling, block headers, goroutine profile
+ * capture from local algod by data dir or cluster from terraform-inventory.host
+ * convert profiles to svg or other reports
-# Usage
+* block_history.py
+ * Capture block headers every round from a running `algod`
+
+* block_history_relays.py
+ * Capture block headers every round from one or more running `algod`
+ * Talk to a set of relays found in a terraform-inventory.host file.
+
+* block_history_plot.py
+ * Plot the output of test/heapwatch/{block_history.py,block_history_relays.py}
+
+* client_ram_report.py
+ * Process heap profiles (*.heap) collected from heapWatch.py
+ * Create a report on `algod` RAM usage
+
+* plot_crr_csv.py
+ * Plot the output of test/heapwatch/client_ram_report.py --csv
+
+* metrics_delta.py
+ * Process /metrics data captured by heapWatch.py
+ * Generate text report on bandwidth in and out of relays/PN/NPN
+ * optionally plot txn pool fullness
+
+* start.sh stop.sh
+ * Run a local private network of three nodes and two pingpongs.
+ * Periodically sample pprof memory profiles.
+ * Watch memory usage from `ps` and write to a CSV file for each algod.
+
+* bwstart.sh stop.sh
+ * Run a local private network of 3 relays and 8 leafs
+ * Run 40 TPS of payment txns through it.
+ * Record metrics for bandwidth analysis.
+
+* runNodeHost.py nodeHostTarget.py
+ * run new ec2 host with npn and pn algod on it pointed at one relay (no DNS needed)
+
+
+## heapWatch.py local cluster usage
To start:
diff --git a/test/heapwatch/block_history.py b/test/heapwatch/block_history.py
new file mode 100644
index 000000000..29182e760
--- /dev/null
+++ b/test/heapwatch/block_history.py
@@ -0,0 +1,258 @@
+#!/usr/bin/env python3
+# Copyright (C) 2019-2022 Algorand, Inc.
+# This file is part of go-algorand
+#
+# go-algorand is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# go-algorand is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+#
+###
+#
+# Capture block headers every round from a running `algod`
+#
+# pip install py-algorand-sdk
+
+import argparse
+import base64
+import logging
+import os
+import re
+import signal
+import sys
+import time
+
+import algosdk
+from algosdk.encoding import msgpack
+from algosdk.v2client.algod import AlgodClient
+
+logger = logging.getLogger(__name__)
+
+def addr_token_from_algod(algorand_data):
+ with open(os.path.join(algorand_data, 'algod.net')) as fin:
+ addr = fin.read().strip()
+ with open(os.path.join(algorand_data, 'algod.token')) as fin:
+ token = fin.read().strip()
+ if not addr.startswith('http'):
+ addr = 'http://' + addr
+ return addr, token
+
+def loads(blob):
+ return msgpack.loads(base64.b64decode(blob), strict_map_key=False)
+
+def dumps(blob):
+ return base64.b64encode(msgpack.dumps(blob))
+
+class Fetcher:
+ def __init__(self, algorand_data=None, token=None, addr=None, headers=None, prev_round=None, outpath=None):
+ """
+ algorand_data = path to algod data dir
+ addr, token = algod URI and access token
+ headers = dict of HTTP headers to send to algod
+ prev_round = start with (prev_round + 1)
+ outpath = path to append base64-msgpack-per-line data to
+ """
+ self.algorand_data = algorand_data
+ self.token = token
+ self.addr = addr
+ self.headers = headers
+ self._algod = None
+ self.go = True
+ self.prev_round = prev_round
+ self.block_time = None
+ self.outpath = outpath
+ self._outf = None
+ if outpath and ((prev_round is None) or (prev_round == -1)):
+ # load data, find last known round in data
+ try:
+ with open(outpath) as fin:
+ for line in fin:
+ if not line:
+ continue
+ line = line.strip()
+ if not line:
+ continue
+ if line[0] == '#':
+ continue
+ ob = loads(line)
+ rnd = ob['block'].get('rnd', 0)
+ if (self.prev_round is None) or (rnd > self.prev_round):
+ self.prev_round = rnd
+ except:
+ pass # whatever
+ return
+
+ def algod(self):
+ "return an open algosdk.v2client.algod.AlgodClient"
+ if self._algod is None:
+ if self.algorand_data:
+ addr, token = addr_token_from_algod(self.algorand_data)
+ logger.debug('algod from %r, (%s %s)', self.algorand_data, addr, token)
+ else:
+ token = self.token
+ addr = self.addr
+ logger.debug('algod from args (%s %s)', self.addr, self.token)
+ self._algod = AlgodClient(token, addr, headers=self.headers)
+ return self._algod
+
+ def outf(self):
+ if self._outf is None:
+ self._outf = open(self.outpath, 'ab')
+ return self._outf
+
+ def nextblock(self, lastround=None, retries=30):
+ trycount = 0
+ while (trycount < retries) and self.go:
+ trycount += 1
+ try:
+ return self._nextblock_inner(lastround)
+ except Exception as e:
+ if trycount >= retries:
+ logger.error('too many errors in nextblock retries')
+ raise
+ else:
+ logger.warning('error in nextblock(%r) (retrying): %s', lastround, e)
+ self._algod = None # retry with a new connection
+ time.sleep(1.2)
+ return None
+
+ def _nextblock_inner(self, lastround):
+ self.block_time = None
+ algod = self.algod()
+ if lastround is None:
+ status = algod.status()
+ lastround = status['last-round']
+ logger.debug('nextblock status last-round %s', lastround)
+ else:
+ try:
+ blk = self.algod().block_info(lastround + 1, response_format='msgpack')
+ if blk:
+ return blk
+ logger.warning('null block %d, lastround=%r', lastround+1, lastround)
+ except Exception as e:
+ pass
+ #logger.debug('could not get block %d: %s', lastround + 1, e, exc_info=True)
+ status = algod.status_after_block(lastround)
+ block_time = time.time() # the block has happened, don't count block data transit time
+ nbr = status['last-round']
+ retries = 30
+ while (nbr > lastround + 1) and self.go:
+ # if more than one block elapsed, we don't have a good time for either block
+ block_time = None
+ # try lastround+1 one last time
+ try:
+ blk = self.algod().block_info(lastround + 1, response_format='msgpack')
+ if blk:
+ return blk
+ logger.warning('null block %d, lastround=%r, status.last-round=%d', lastround+1, lastround, nbr)
+ time.sleep(1.1)
+ retries -= 1
+ if retries <= 0:
+ raise Exception("too many null block for %d", lastround+1)
+ except:
+ break
+ blk = self.algod().block_info(nbr, response_format='msgpack')
+ if blk:
+ self.block_time = block_time
+ return blk
+ raise Exception('got None for blk {}'.format(nbr))
+
+ def loop(self):
+ """Start processing blocks and txns
+ runs until error or bot.go=False
+ """
+ try:
+ self._loop_inner(self.prev_round)
+ finally:
+ self.close()
+
+ def _loop_inner(self, lastround):
+ while self.go:
+ b = self.nextblock(lastround)
+ if b is None:
+ print("got None nextblock. exiting")
+ return
+ b = msgpack.loads(b, strict_map_key=False)
+ nowround = b['block'].get('rnd', 0)
+ if (lastround is not None) and (nowround != lastround + 1):
+ logger.info('round jump %d to %d', lastround, nowround)
+ self._block_handler(b)
+ lastround = nowround
+
+ def _block_handler(self, b):
+ # throw away txns, count is kept in round differential ['block']['tc']
+ b['block'].pop('txns', [])
+ # throw away certs
+ b.pop('cert', None)
+ # Add fine grained time. This should be better than ['block']['ts']
+ b['_time'] = self.block_time or time.time()
+ self.outf().write(dumps(b) + b'\n')
+
+ def close(self):
+ self._algod = None
+
+def header_list_to_dict(hlist):
+ if not hlist:
+ return None
+ p = re.compile(r':\s+')
+ out = {}
+ for x in hlist:
+ a, b = p.split(x, 1)
+ out[a] = b
+ return out
+
+def main():
+ ap = argparse.ArgumentParser()
+ ap.add_argument('-d', '--algod', default=None, help='algod data dir')
+ ap.add_argument('-a', '--addr', default=None, help='algod host:port address')
+ ap.add_argument('-t', '--token', default=None, help='algod API access token')
+ ap.add_argument('--header', dest='headers', nargs='*', help='"Name: value" HTTP header (repeatable)')
+ ap.add_argument('--all', default=False, action='store_true', help='fetch all blocks from 0')
+ ap.add_argument('--verbose', default=False, action='store_true')
+ ap.add_argument('-o', '--out', default=None, help='file to append json lines to')
+ args = ap.parse_args()
+
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+
+ algorand_data = args.algod or os.getenv('ALGORAND_DATA')
+ if not algorand_data and not (args.token and args.addr):
+ sys.stderr.write('must specify algod data dir by $ALGORAND_DATA or -d/--algod; OR --a/--addr and -t/--token\n')
+ sys.exit(1)
+
+ prev_round = None
+ if args.all:
+ prev_round = -1
+ bot = Fetcher(
+ algorand_data,
+ token=args.token,
+ addr=args.addr,
+ headers=header_list_to_dict(args.headers),
+ outpath=args.out,
+ prev_round=prev_round,
+ )
+
+ import signal
+ def do_graceful_stop(signum, frame):
+ if bot.go == False:
+ sys.stderr.write("second signal, quitting\n")
+ sys.exit(1)
+ sys.stderr.write("graceful stop...\n")
+ bot.go = False
+ signal.signal(signal.SIGTERM, do_graceful_stop)
+ signal.signal(signal.SIGINT, do_graceful_stop)
+
+ bot.loop()
+
+if __name__ == '__main__':
+ main()
diff --git a/test/heapwatch/block_history_plot.py b/test/heapwatch/block_history_plot.py
new file mode 100644
index 000000000..174c1dca1
--- /dev/null
+++ b/test/heapwatch/block_history_plot.py
@@ -0,0 +1,162 @@
+#!/usr/bin/env python3
+# Copyright (C) 2019-2022 Algorand, Inc.
+# This file is part of go-algorand
+#
+# go-algorand is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# go-algorand is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+#
+###
+#
+# Plot the output of test/heapwatch/{block_history.py,block_history_relays.py}
+#
+# Histograms of round times, TPS, txn/block
+# Graph over time of TPS or 10-round-moving-average-TPS
+
+import base64
+import statistics
+
+from algosdk.encoding import msgpack
+from matplotlib import pyplot as plt
+
+def process(path, args):
+ prevtime = None
+ prevtc = 0
+ prevts = None
+ prevrnd = None
+ mintxn = 9999999
+ maxtxn = 0
+ mindt = 999999
+ maxdt = 0
+ mintps = 999999
+ maxtps = 0
+ tcv = []
+ tsv = []
+ tpsv = []
+ dtv = []
+ txnv = []
+ count = 0
+ with open(path, 'rb') as fin:
+ for line in fin:
+ line = line.strip()
+ row = msgpack.loads(base64.b64decode(line), strict_map_key=False)
+ count += 1
+ block = row['block']
+ rnd = block.get('rnd',0)
+ tc = block.get('tc', 0)
+ ts = block.get('ts', 0) # timestamp recorded at algod, 1s resolution int
+ _time = row['_time'] # timestamp recorded at client, 0.000001s resolution float
+ tcv.append(tc)
+ if prevtime is not None:
+ dt = _time - prevtime
+ if dt < 1:
+ dt = ts - prevts
+ tsv.append(ts)
+ else:
+ if _time < tsv[-1]:
+ tsv.append(ts)
+ else:
+ tsv.append(_time)
+ dtxn = tc - prevtc
+ tps = dtxn / dt
+ mintxn = min(dtxn,mintxn)
+ maxtxn = max(dtxn,maxtxn)
+ mindt = min(dt,mindt)
+ maxdt = max(dt,maxdt)
+ mintps = min(tps,mintps)
+ maxtps = max(tps,maxtps)
+ tpsv.append(tps)
+ dtv.append(dt)
+ txnv.append(dtxn)
+ else:
+ tsv.append(ts)
+ prevrnd = rnd
+ prevtc = tc
+ prevts = ts
+ prevtime = _time
+ print('{} blocks, block txns [{}-{}], block seconds [{}-{}], tps [{}-{}]'.format(
+ count,
+ mintxn,maxtxn,
+ mindt,maxdt,
+ mintps,maxtps,
+ ))
+
+ start = args.start
+ end = len(txnv)-1
+ if not args.all:
+ # find the real start of the test
+ start += 1
+ for i in range(len(txnv)):
+ if len(list(filter(lambda x: x > 100, txnv[i:i+5]))) == 5:
+ start = i + 5
+ break
+ txmean = statistics.mean(txnv[start:])
+ txstd = statistics.stdev(txnv[start:])
+ end = len(txnv)
+ for i in range(start,len(txnv)):
+ if len(list(filter(lambda x: x > txmean-(txstd*2), txnv[i:i+5]))) < 4:
+ print(i)
+ end = i
+ break
+
+ print('core test rounds [{}:{}]'.format(start,end))
+ print('block txns [{}-{}], block seconds [{}-{}], tps [{}-{}]'.format(
+ min(txnv[start:end]), max(txnv[start:end]),
+ min(dtv[start:end]), max(dtv[start:end]),
+ min(tpsv[start:end]), max(tpsv[start:end]),
+ ))
+ print('long round times: {}'.format(' '.join(list(map(str,filter(lambda x: x >= 9,dtv[start:end]))))))
+ fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2)
+ ax1.set_title('round time (seconds)')
+ ax1.hist(list(filter(lambda x: x < 9,dtv[start:end])),bins=20)
+
+ ax2.set_title('TPS')
+ ax2.hist(tpsv[start:end],bins=20)
+
+ ax3.set_title('txn/block')
+ ax3.hist(txnv[start:end],bins=20)
+
+ # 10 round moving average TPS
+ tpsv10 = []
+ for i in range(10,len(tsv)):
+ ts0 = tsv[i-10]
+ tsa = tsv[i]
+ tc0 = tcv[i-10]
+ tca = tcv[i]
+ dt = tsa-ts0
+ dtxn = tca-tc0
+ tpsv10.append(dtxn/dt)
+ if args.tps1:
+ ax4.set_title('TPS')
+ ax4.plot(tpsv[start:end])
+ print('fullish block sizes: {}'.format(list(filter(lambda x: x > 100, txnv))))
+ else:
+ ax4.set_title('TPS(10 round window)')
+ ax4.plot(tpsv10)
+ fig.tight_layout()
+ plt.savefig(path + '_hist.svg', format='svg')
+ plt.savefig(path + '_hist.png', format='png')
+
+def main():
+ import argparse
+ ap = argparse.ArgumentParser()
+ ap.add_argument('files', nargs='+')
+ ap.add_argument('--all', default=False, action='store_true')
+ ap.add_argument('--tps1', default=False, action='store_true')
+ ap.add_argument('--start', default=0, type=int, help='start round')
+ args = ap.parse_args()
+
+ for fname in args.files:
+ process(fname, args)
+
+if __name__ == '__main__':
+ main()
diff --git a/test/heapwatch/block_history_relays.py b/test/heapwatch/block_history_relays.py
new file mode 100644
index 000000000..5d3c7b0c7
--- /dev/null
+++ b/test/heapwatch/block_history_relays.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python3
+# Copyright (C) 2019-2022 Algorand, Inc.
+# This file is part of go-algorand
+#
+# go-algorand is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# go-algorand is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+#
+###
+#
+# Capture block headers every round from a running `algod`
+# Talk to a set of relays found in a terraform-inventory.host file.
+#
+# pip install py-algorand-sdk
+
+
+import argparse
+import atexit
+import configparser
+import logging
+import os
+import re
+import signal
+import sys
+import threading
+
+import block_history
+
+logger = logging.getLogger(__name__)
+
+graceful_stop = False
+fetchers = []
+
+def do_graceful_stop(signum, frame):
+ global fetchers
+ global graceful_stop
+ if graceful_stop:
+ sys.stderr.write("second signal, quitting\n")
+ sys.exit(1)
+ sys.stderr.write("graceful stop...\n")
+ graceful_stop = True
+ for fet in fetchers:
+ fet.go = False
+
+relay_pat = re.compile(r'name_r\d+')
+
+def main():
+ ap = argparse.ArgumentParser()
+ ap.add_argument('--tf-inventory', default='terraform-inventory.host', help='terraform inventory file to use if no data_dirs specified')
+ ap.add_argument('--all', default=False, action='store_true')
+ ap.add_argument('-p', '--port', default='8580', help='algod port on each host in terraform-inventory')
+ ap.add_argument('--pid')
+ ap.add_argument('--token', default='', help='default algod api token to use')
+ ap.add_argument('--outdir', required=True)
+ ap.add_argument('--all-rounds', default=False, action='store_true', help='fetch all blocks from 0')
+ ap.add_argument('--verbose', default=False, action='store_true')
+ args = ap.parse_args()
+
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+
+ if args.pid:
+ with open(args.pid, 'w') as fout:
+ fout.write('{}'.format(os.getpid()))
+ atexit.register(os.remove, args.pid)
+ prev_round = None
+ if args.all_rounds:
+ prev_round = -1
+ signal.signal(signal.SIGTERM, do_graceful_stop)
+ signal.signal(signal.SIGINT, do_graceful_stop)
+
+ threads = []
+ cp = configparser.ConfigParser(allow_no_value=True)
+ cp.read(args.tf_inventory)
+ for k,v in cp.items():
+ if not relay_pat.match(k):
+ continue
+ if args.all:
+ pass
+ elif k.endswith('1'):
+ pass
+ else:
+ continue
+ for net in v.keys():
+ addr = 'http://' + net + ':' + args.port
+ outpath = os.path.join(args.outdir, k + '_' + net + '.blockhistory')
+ fet = block_history.Fetcher(addr=addr, token=args.token, outpath=outpath, prev_round=prev_round)
+ t = threading.Thread(target=fet.loop)
+ logger.debug('starting %s -> %s', addr, outpath)
+ t.start()
+ threads.append(t)
+ fetchers.append(fet)
+ for t in threads:
+ t.join()
+ logger.debug('block_history_relays.py done')
+
+if __name__ == '__main__':
+ main()
diff --git a/test/heapwatch/client_ram_report.py b/test/heapwatch/client_ram_report.py
index 5ac0f2dd2..04f212f18 100644
--- a/test/heapwatch/client_ram_report.py
+++ b/test/heapwatch/client_ram_report.py
@@ -1,6 +1,27 @@
#!/usr/bin/env python3
+# Copyright (C) 2019-2022 Algorand, Inc.
+# This file is part of go-algorand
+#
+# go-algorand is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# go-algorand is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+#
+###
+#
+# Process heap profiles (*.heap) collected from heapWatch.py
+# Create a report on `algod` RAM usage
import argparse
+import configparser
import csv
import glob
import json
@@ -83,6 +104,43 @@ def get_heap_inuse_totals(dirpath):
return cached
+def maybe_load_tf_nicks(args):
+ tf_inventory_path = os.path.join(args.dir, 'terraform-inventory.host')
+ if not os.path.exists(tf_inventory_path):
+ return None
+ tf_inventory = configparser.ConfigParser(allow_no_value=True)
+ tf_inventory.read(tf_inventory_path)
+ ip_to_name = {}
+ for k, sub in tf_inventory.items():
+ if k.startswith('name_'):
+ nick = k[5:]
+ for ip in sub:
+ if ip in ip_to_name:
+ logger.warning('ip %r already named %r, also got %r', ip, ip_to_name[ip], k)
+ ip_to_name[ip] = nick
+ return ip_to_name
+
+
+def hostports_to_nicks(args, hostports):
+ ip_to_nick = maybe_load_tf_nicks(args)
+ if not ip_to_nick:
+ return hostports
+ out = []
+ for hp in hostports:
+ hit = None
+ for ip, nick in ip_to_nick.items():
+ if ip in hp:
+ if hit is None:
+ hit = nick
+ else:
+ logger.warning('nick collision in ip=%r, hit=%r nick=%r', ip, hit, nick)
+ hit = nick
+ if not hit:
+ hit = hp
+ out.append(hit)
+ return out
+
+
def main():
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--dir', required=True, help='dir path to find /*.metrics in')
@@ -109,7 +167,7 @@ def main():
whens.add(ts)
whens = sorted(whens)
nodes = sorted(heap_totals.keys())
- writer.writerow(['when','dt','round'] + nodes)
+ writer.writerow(['when','dt','round'] + hostports_to_nicks(args, nodes))
first = None
for ts in whens:
tv = time.mktime(time.strptime(ts, '%Y%m%d_%H%M%S'))
diff --git a/test/heapwatch/heapWatch.py b/test/heapwatch/heapWatch.py
index 43e51618e..aced214f0 100644
--- a/test/heapwatch/heapWatch.py
+++ b/test/heapwatch/heapWatch.py
@@ -1,6 +1,24 @@
#!/usr/bin/python3
#
-# repeatedly snapshot heap profiles for one or more algod
+# Copyright (C) 2019-2022 Algorand, Inc.
+# This file is part of go-algorand
+#
+# go-algorand is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# go-algorand is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+#
+###
+#
+# repeatedly snapshot metrics & profiles for one or more algod
#
# usage:
# mkdir -p /tmp/heaps
@@ -12,12 +30,15 @@ import configparser
import fnmatch
import json
import logging
+import math
import os
+import queue
import re
import signal
import shutil
import subprocess
import sys
+import threading
import time
import urllib.request
@@ -79,9 +100,17 @@ def jsonable(ob):
return {jsonable(k):jsonable(v) for k,v in ob.items()}
return ob
+def nmax(a,b):
+ if a is None:
+ return b
+ if b is None:
+ return a
+ return max(a,b)
+
class algodDir:
def __init__(self, path, net=None, token=None, admin_token=None):
self.path = path
+ self.isdir = os.path.isdir(path)
self.nick = os.path.basename(self.path)
if net is None:
net, token, admin_token = read_algod_dir(self.path)
@@ -91,9 +120,12 @@ class algodDir:
self.headers = {}
self._pid = None
self._algod = None
+ self.timeout = 15
def pid(self):
if self._pid is None:
+ if not self.isdir:
+ return None
with open(os.path.join(self.path, 'algod.pid')) as fin:
self._pid = int(fin.read())
return self._pid
@@ -106,11 +138,17 @@ class algodDir:
self._algod = algosdk.v2client.algod.AlgodClient(self.token, net, self.headers)
return self._algod
- def get_pprof_snapshot(self, name, snapshot_name=None, outdir=None):
+ def get_pprof_snapshot(self, name, snapshot_name=None, outdir=None, timeout=None):
+ if timeout is None:
+ timeout = self.timeout
url = 'http://' + self.net + '/urlAuth/' + self.admin_token + '/debug/pprof/' + name
- response = urllib.request.urlopen(urllib.request.Request(url, headers=self.headers))
+ try:
+ response = urllib.request.urlopen(urllib.request.Request(url, headers=self.headers), timeout=timeout)
+ except Exception as e:
+ logger.error('could not fetch %s from %s via %r (%s)', name, self.path, url, e)
+ return
if response.code != 200:
- logger.error('could not fetch %s from %s via %r', name, self.path. url)
+ logger.error('could not fetch %s from %s via %r (%r)', name, self.path, url, response.code)
return
blob = response.read()
if snapshot_name is None:
@@ -127,10 +165,16 @@ class algodDir:
def get_goroutine_snapshot(self, snapshot_name=None, outdir=None):
return self.get_pprof_snapshot('goroutine', snapshot_name, outdir)
- def get_metrics(self, snapshot_name=None, outdir=None):
+ def get_cpu_profile(self, snapshot_name=None, outdir=None, seconds=90):
+ seconds = int(seconds)
+ return self.get_pprof_snapshot('profile?seconds={}'.format(seconds), snapshot_name, outdir, timeout=seconds+20)
+
+ def get_metrics(self, snapshot_name=None, outdir=None, timeout=None):
url = 'http://' + self.net + '/metrics'
+ if timeout is None:
+ timeout = self.timeout
try:
- response = urllib.request.urlopen(urllib.request.Request(url, headers=self.headers))
+ response = urllib.request.urlopen(urllib.request.Request(url, headers=self.headers), timeout=timeout)
if response.code != 200:
logger.error('could not fetch %s from %s via %r', snapshot_name, self.path. url)
return
@@ -143,6 +187,11 @@ class algodDir:
fout.write(blob)
logger.debug('%s -> %s', self.nick, outpath)
+ def go_metrics(self, snapshot_name=None, outdir=None):
+ t = threading.Thread(target=self.get_metrics, args=(snapshot_name, outdir))
+ t.start()
+ return t
+
def get_blockinfo(self, snapshot_name=None, outdir=None):
try:
algod = self.algod()
@@ -160,9 +209,20 @@ class algodDir:
with open(outpath, 'wt') as fout:
json.dump(jsonable(bi), fout)
return bi
- #txncount = bi['block']['tc']
+
+ def _get_blockinfo_q(self, snapshot_name=None, outdir=None, biqueue=None):
+ bi = self.get_blockinfo(snapshot_name, outdir)
+ if biqueue and bi:
+ biqueue.put(bi)
+
+ def go_blockinfo(self, snapshot_name=None, outdir=None, biqueue=None):
+ t = threading.Thread(target=self._get_blockinfo_q, args=(snapshot_name, outdir, biqueue))
+ t.start()
+ return t
def psHeap(self):
+ if not self.isdir:
+ return None, None
# return rss, vsz (in kilobytes)
# ps -o rss,vsz $(cat ${ALGORAND_DATA}/algod.pid)
subp = subprocess.Popen(['ps', '-o', 'rss,vsz', str(self.pid())], stdout=subprocess.PIPE)
@@ -177,12 +237,33 @@ class algodDir:
except:
return None, None
+class maxrnd:
+ def __init__(self, biqueue):
+ self.biqueue = biqueue
+ self.maxrnd = None
+
+ def _run(self):
+ while True:
+ bi = self.biqueue.get()
+ if 'block' not in bi:
+ return
+ rnd = bi['block'].get('rnd',0)
+ if (self.maxrnd is None) or (rnd > self.maxrnd):
+ self.maxrnd = rnd
+ def start(self):
+ t = threading.Thread(target=self._run)
+ t.start()
+ return t
+
class watcher:
def __init__(self, args):
self.args = args
self.prevsnapshots = {}
self.they = []
self.netseen = set()
+ self.latest_round = None
+ self.bi_hosts = []
+ self.netToAd = {}
os.makedirs(self.args.out, exist_ok=True)
if not args.data_dirs and os.path.exists(args.tf_inventory):
cp = configparser.ConfigParser(allow_no_value=True)
@@ -190,6 +271,8 @@ class watcher:
shutil.copy2(args.tf_inventory, self.args.out)
for role in args.tf_roles.split(','):
role_name = 'role_' + role
+ if role_name not in cp:
+ continue
for net in cp[role_name].keys():
logger.debug('addnet role %s %s', role, net)
self._addnet(net)
@@ -201,6 +284,19 @@ class watcher:
for net in v.keys():
logger.debug('addnet re %s %s', nre, net)
self._addnet(net)
+ if args.tf_bi_re:
+ namere = re.compile(args.tf_bi_re)
+ for k,v in cp.items():
+ if not namere.match(k):
+ continue
+ for net in v.keys():
+ logger.debug('bi net %s %s', nre, net)
+ ad = self.netToAd.get(net)
+ if not ad:
+ self._addnet(net)
+ ad = self.netToAd.get(net)
+ if ad:
+ self.bi_hosts.append(ad)
for path in args.data_dirs:
if not os.path.isdir(path):
continue
@@ -222,20 +318,29 @@ class watcher:
try:
ad = algodDir(net, net=net, token=self.args.token, admin_token=self.args.admin_token)
self.they.append(ad)
+ self.netToAd[net] = ad
except:
logger.error('bad algod: %r', net, exc_info=True)
- def do_snap(self, now):
+ def do_snap(self, now, get_cpu=False, fraction=False):
snapshot_name = time.strftime('%Y%m%d_%H%M%S', time.gmtime(now))
snapshot_isotime = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(now))
+ if fraction:
+ sf = now - math.floor(now)
+ sfs = '{:.6f}'.format(sf)
+ if sfs[0] == '0':
+ sfs = sfs[1:]
+ snapshot_name += sfs
+ snapshot_isotime += sfs
logger.debug('begin snapshot %s', snapshot_name)
psheaps = {}
newsnapshots = {}
if self.args.heaps:
for ad in self.they:
snappath = ad.get_heap_snapshot(snapshot_name, outdir=self.args.out)
- newsnapshots[ad.path] = snappath
+ if snappath:
+ newsnapshots[ad.path] = snappath
rss, vsz = ad.psHeap()
if rss and vsz:
psheaps[ad.nick] = (rss, vsz)
@@ -247,11 +352,35 @@ class watcher:
for ad in self.they:
ad.get_goroutine_snapshot(snapshot_name, outdir=self.args.out)
if self.args.metrics:
+ threads = []
for ad in self.they:
- ad.get_metrics(snapshot_name, outdir=self.args.out)
+ threads.append(ad.go_metrics(snapshot_name, outdir=self.args.out))
+ for t in threads:
+ t.join()
+ logger.debug('metrics done')
if self.args.blockinfo:
+ threads = []
+ biq = queue.SimpleQueue()
+ mr = maxrnd(biq)
+ mrt = mr.start()
+ bi_hosts = self.bi_hosts or self.they
+ for ad in bi_hosts:
+ threads.append(ad.go_blockinfo(snapshot_name, outdir=self.args.out, biqueue=biq))
+ for t in threads:
+ t.join()
+ biq.put({})
+ mrt.join()
+ self.latest_round = mr.maxrnd
+ logger.debug('blockinfo done')
+ if get_cpu:
+ cpuSample = durationToSeconds(self.args.cpu_sample) or 90
+ threads = []
for ad in self.they:
- ad.get_blockinfo(snapshot_name, outdir=self.args.out)
+ t = threading.Thread(target=ad.get_cpu_profile, kwargs={'snapshot_name':snapshot_name, 'outdir':self.args.out, 'seconds': cpuSample})
+ t.start()
+ threads.append(t)
+ for t in threads:
+ t.join()
if self.args.svg:
logger.debug('snapped, processing pprof...')
# make absolute and differential plots
@@ -263,6 +392,24 @@ class watcher:
subprocess.call(['go', 'tool', 'pprof', '-sample_index=inuse_space', '-svg', '-output', snappath + '.inuse_diff.svg', '-base='+prev, snappath])
subprocess.call(['go', 'tool', 'pprof', '-sample_index=alloc_space', '-svg', '-output', snappath + '.alloc_diff.svg', '-diff_base='+prev, snappath])
self.prevsnapshots = newsnapshots
+ logger.debug('end snapshot %s', snapshot_name)
+
+def durationToSeconds(rts):
+ if rts is None:
+ return None
+ rts = rts.lower()
+ if rts.endswith('h'):
+ mult = 3600
+ rts = rts[:-1]
+ elif rts.endswith('m'):
+ mult = 60
+ rts = rts[:-1]
+ elif rts.endswith('s'):
+ mult = 1
+ rts = rts[:-1]
+ else:
+ mult = 1
+ return float(rts) * mult
def main():
ap = argparse.ArgumentParser()
@@ -273,14 +420,18 @@ def main():
ap.add_argument('--blockinfo', default=False, action='store_true', help='also capture block header info')
ap.add_argument('--period', default=None, help='seconds between automatically capturing')
ap.add_argument('--runtime', default=None, help='(\d+)[hm]? time in hour/minute (default second) to gather info then exit')
+ ap.add_argument('--rounds', default=None, type=int, help='number of rounds to run')
ap.add_argument('--tf-inventory', default='terraform-inventory.host', help='terraform inventory file to use if no data_dirs specified')
ap.add_argument('--token', default='', help='default algod api token to use')
ap.add_argument('--admin-token', default='', help='default algod admin-api token to use')
ap.add_argument('--tf-roles', default='relay', help='comma separated list of terraform roles to follow')
ap.add_argument('--tf-name-re', action='append', default=[], help='regexp to match terraform node names, may be repeated')
+ ap.add_argument('--tf-bi-re', help='hosts to get blocks from')
ap.add_argument('--svg', dest='svg', default=False, action='store_true', help='automatically run `go tool pprof` to generate performance profile svg from collected data')
ap.add_argument('-p', '--port', default='8580', help='algod port on each host in terraform-inventory')
ap.add_argument('-o', '--out', default=None, help='directory to write to')
+ ap.add_argument('--cpu-after', help='capture cpu profile after some time (e.g. 5m (after start))')
+ ap.add_argument('--cpu-sample', help='capture cpu profile for some time (e.g. 90s)')
ap.add_argument('--verbose', default=False, action='store_true')
args = ap.parse_args()
@@ -305,45 +456,49 @@ def main():
app.do_snap(now)
endtime = None
+ end_round = None
+ if (app.latest_round is not None) and (args.rounds is not None):
+ end_round = app.latest_round + args.rounds
if args.runtime:
- rts = args.runtime
- if rts.endswith('h'):
- mult = 3600
- rts = rts[:-1]
- elif rts.endswith('m'):
- mult = 60
- rts = rts[:-1]
- else:
- mult = 1
- endtime = (float(rts) * mult) + start
+ endtime = durationToSeconds(args.runtime) + start
+ logger.debug('now %.1f; endtime %.1f', start, endtime)
+
+ cpuAfter = durationToSeconds(args.cpu_after)
+ if cpuAfter is not None:
+ cpuAfter += start
+
if args.period:
- lastc = args.period.lower()[-1:]
- if lastc == 's':
- periodSecs = int(args.period[:-1])
- elif lastc == 'm':
- periodSecs = int(args.period[:-1]) * 60
- elif lastc == 'h':
- periodSecs = int(args.period[:-1]) * 3600
- else:
- periodSecs = int(args.period)
+ periodSecs = durationToSeconds(args.period)
+ snap_fraction = periodSecs < 1.0
periodi = 1
nextt = start + (periodi * periodSecs)
while not graceful_stop:
+ logger.debug('nextt %f now %f', nextt, now)
while nextt < now:
nextt = start + (periodi * periodSecs)
+ periodi += 1
while now < nextt - (periodSecs * 0.05):
logger.debug('sleep %f', nextt - now)
time.sleep(nextt - now)
if graceful_stop:
- return
+ return 0
now = time.time()
periodi += 1
nextt += periodSecs
- app.do_snap(now)
+ get_cpu = False
+ if (cpuAfter is not None) and (now > cpuAfter):
+ get_cpu = True
+ cpuAfter = None
+ app.do_snap(now, get_cpu, fraction=snap_fraction)
+ now = time.time()
if (endtime is not None) and (now > endtime):
- return
+ logger.debug('after endtime, done')
+ return 0
+ if (end_round is not None) and (app.latest_round is not None) and (app.latest_round >= end_round):
+ logger.debug('after end round %d > %d', app.latest_round, end_round)
+ return 0
return 0
if __name__ == '__main__':
diff --git a/test/heapwatch/metrics_delta.py b/test/heapwatch/metrics_delta.py
index b6aa2ae8a..70324c3c7 100644
--- a/test/heapwatch/metrics_delta.py
+++ b/test/heapwatch/metrics_delta.py
@@ -1,4 +1,25 @@
#!/usr/bin/env python3
+# Copyright (C) 2019-2022 Algorand, Inc.
+# This file is part of go-algorand
+#
+# go-algorand is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# go-algorand is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+#
+###
+#
+# Process /metrics data captured by heapWatch.py
+#
+# Generate text report on bandwidth in and out of relays/PN/NPN
import argparse
import configparser
@@ -143,18 +164,32 @@ class summary:
self.tpsMeanSum = 0
self.txBpsMeanSum = 0
self.rxBpsMeanSum = 0
+ self.tpsSum = 0
+ self.blockTimeSum = 0
self.sumsCount = 0
self.nodes = {}
+ self.biByTime = {}
+ self.verifyMillis = []
def __call__(self, ttr, nick):
if not ttr:
+ logger.debug('no summary for %s', nick)
return
self.nodes[nick] = ttr
logger.debug('%d points from %s', len(ttr.tpsList), nick)
self.tpsMeanSum += meanOrZero(ttr.tpsList)
self.txBpsMeanSum += meanOrZero(ttr.txBpsList)
self.rxBpsMeanSum += meanOrZero(ttr.rxBpsList)
+ self.tpsSum += ttr.tps
+ self.blockTimeSum += ttr.blockTime
self.sumsCount += 1
+ if ttr.biByTime:
+ self.biByTime.update(ttr.biByTime)
+ if ttr.verifyMillis:
+ self.verifyMillis.append(ttr.verifyMillis)
+
+ def blockinfo(self, curtime):
+ return self.biByTime.get(curtime)
def byMsg(self):
txPSums = {}
@@ -209,14 +244,42 @@ class summary:
def __str__(self):
if not self.sumsCount:
tps, txbps, rxbps = math.nan, math.nan, math.nan
+ blockTimes = math.nan
else:
- tps = self.tpsMeanSum/self.sumsCount
+ #tps = self.tpsMeanSum/self.sumsCount
+ tps = self.tpsSum/self.sumsCount
+ blockTimes = self.blockTimeSum/self.sumsCount
txbps = self.txBpsMeanSum/self.sumsCount
rxbps = self.rxBpsMeanSum/self.sumsCount
labelspace = ""
if self.label:
labelspace = self.label + " "
- return '{byMsg}\n{labelspace}{txPool}\n{labelspace}summary: {TPS:0.2f} TPS, tx {txBps}B/s, rx {rxBps}B/s'.format(labelspace=labelspace, byMsg=self.byMsg(), txPool=self.txPool(), TPS=tps, txBps=hunum(txbps), rxBps=hunum(rxbps))
+ if self.verifyMillis:
+ verifyMillis = labelspace + 'verify ms ({:.0f}/{:.0f}/{:.0f})\n'.format(min(self.verifyMillis), meanOrZero(self.verifyMillis), max(self.verifyMillis))
+ else:
+ verifyMillis = ''
+ return '{byMsg}\n{verifyMillis}{labelspace}{txPool}\n{labelspace}summary: {TPS:0.2f} TPS, {bt:1.2f}s/block, tx {txBps}B/s, rx {rxBps}B/s'.format(labelspace=labelspace, byMsg=self.byMsg(), txPool=self.txPool(), TPS=tps, txBps=hunum(txbps), rxBps=hunum(rxbps), bt=blockTimes, verifyMillis=verifyMillis)
+
+ def plot_pool(self, outpath):
+ from matplotlib import pyplot as plt
+ any = False
+ for nick, ns in self.nodes.items():
+ if not ns.txPool:
+ continue
+ any = True
+ plt.plot(ns.times, ns.txPool, label=nick)
+ csvoutpath = outpath + nick + '.csv'
+ with open(csvoutpath, 'w') as fout:
+ writer = csv.writer(fout)
+ writer.writerow(['time', 'pool'])
+ for t, p in zip(ns.times, ns.txPool):
+ writer.writerow([t,p])
+ if not any:
+ logger.error('no txPool in {}'.format(list(self.nodes.keys())))
+ return
+ plt.legend(loc='upper right')
+ plt.savefig(outpath + '.svg', format='svg')
+ plt.savefig(outpath + '.png', format='png')
def anynickre(nick_re, nicks):
if not nick_re:
@@ -230,7 +293,7 @@ def anynickre(nick_re, nicks):
def gather_metrics_files_by_nick(metrics_files, metrics_dirs=None):
'''return {"node nickname":[path, path, ...], ...}'''
- metrics_fname_re = re.compile(r'(.*)\.(.*).metrics')
+ metrics_fname_re = re.compile(r'(.*?)\.([0-9_]+\.?\d+)\.metrics')
filesByNick = {}
nonick = []
tf_inventory_path = None
@@ -250,14 +313,16 @@ def gather_metrics_files_by_nick(metrics_files, metrics_dirs=None):
dapp(filesByNick, nick, path)
return tf_inventory_path, filesByNick, nonick
-def process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args):
+def process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args, grsum):
nretup = (nre,)
for rnick, paths in filesByNick.items():
nick = nick_to_tfname.get(rnick, rnick)
if anynickre(nretup, (rnick,nick)):
- rsum(process_files(args, nick, paths), nick)
+ rsum(process_files(args, nick, paths, grsum), nick)
def main():
+ os.environ['TZ'] = 'UTC'
+ time.tzset()
test_metric_line_re()
ap = argparse.ArgumentParser()
ap.add_argument('metrics_files', nargs='*')
@@ -267,6 +332,7 @@ def main():
ap.add_argument('--report', default=None, help='path to write csv report')
ap.add_argument('--nick-re', action='append', default=[], help='regexp to filter node names, may be repeated')
ap.add_argument('--nick-lre', action='append', default=[], help='label:regexp to filter node names, may be repeated')
+ ap.add_argument('--pool-plot-root', help='write to foo.svg and .png')
ap.add_argument('--verbose', default=False, action='store_true')
args = ap.parse_args()
@@ -281,6 +347,7 @@ def main():
metrics_dirs.add(args.dir)
metrics_files += glob.glob(os.path.join(args.dir, '*.metrics'))
tf_inventory_path, filesByNick, nonick = gather_metrics_files_by_nick(metrics_files, metrics_dirs)
+ logger.debug('%d files gathered into %d nicks', len(metrics_files), len(filesByNick))
if not tf_inventory_path:
for md in metrics_dirs:
tp = os.path.join(md, 'terraform-inventory.host')
@@ -300,6 +367,7 @@ def main():
ip_to_name[ip] = k
#logger.debug('names: %r', sorted(ip_to_name.values()))
#logger.debug('ip to name %r', ip_to_name)
+ unfound = []
for ip, name in ip_to_name.items():
found = []
for nick in filesByNick.keys():
@@ -310,14 +378,30 @@ def main():
elif len(found) > 1:
logger.warning('ip %s (%s) found in nicks: %r', ip, name, found)
else:
+ unfound.append((ip,name))
+ if not nick_to_tfname:
+ for ip,name in unfound:
logger.warning('ip %s (%s) no nick', ip, name)
#logger.debug('nick_to_tfname %r', nick_to_tfname)
+ logger.debug('nicks: %s', ' '.join(map(lambda x: nick_to_tfname.get(x,x), filesByNick.keys())))
+
+ # global stats across all nodes
+ grsum = summary()
+ if nonick:
+ grsum(process_files(args, None, nonick), 'no nick')
+ for rnick, paths in filesByNick.items():
+ nick = nick_to_tfname.get(rnick, rnick)
+ logger.debug('%s: %d files', nick, len(paths))
+ grsum(process_files(args, nick, paths), nick)
+ if args.pool_plot_root:
+ grsum.plot_pool(args.pool_plot_root)
+ # maybe subprocess for stats across named groups
if args.nick_re:
# use each --nick-re=foo as a group
for nre in args.nick_re:
rsum = summary()
- process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args)
+ process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args, grsum)
print(rsum)
print('\n')
return 0
@@ -325,20 +409,13 @@ def main():
for lnre in args.nick_lre:
label, nre = lnre.split(':', maxsplit=1)
rsum = summary(label)
- process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args)
+ process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args, grsum)
print(rsum)
print('\n')
return 0
-
- # no filters, glob it all up
- rsum = summary()
- if nonick:
- rsum(process_files(args, None, nonick), 'no nick')
- for rnick, paths in filesByNick.items():
- nick = nick_to_tfname.get(rnick, rnick)
- rsum(process_files(args, nick, paths), nick)
- print(rsum)
+ # no filters, print global result
+ print(grsum)
return 0
def perProtocol(prefix, lists, sums, deltas, dt):
@@ -349,9 +426,22 @@ def perProtocol(prefix, lists, sums, deltas, dt):
dapp(lists, sub, v/dt)
sums[sub] = sums.get(sub,0) + v
-def process_files(args, nick, paths):
+def process_files(args, nick, paths, grsum=None):
"returns a nodestats object"
- return nodestats().process_files(args, nick, paths)
+ return nodestats().process_files(args, nick, paths, grsum and grsum.biByTime)
+
+path_time_re = re.compile(r'(\d\d\d\d)(\d\d)(\d\d)_(\d\d)(\d\d)(\d\d\.+\d+)')
+
+def parse_path_time(path):
+ m = path_time_re.search(path)
+ if not m:
+ return None
+ ts = float(m.group(6))
+ si = math.floor(ts)
+ t = time.mktime((int(m.group(1)), int(m.group(2)), int(m.group(3)),
+ int(m.group(4)), int(m.group(5)), si, 0, 0, 0))
+ t += ts - si
+ return t
class nodestats:
def __init__(self):
@@ -371,14 +461,24 @@ class nodestats:
# algod_network_sent_bytes_*
self.txPLists = {}
self.txPSums = {}
+ self.times = []
# algod_tx_pool_count{}
self.txPool = []
-
- def process_files(self, args, nick=None, metrics_files=None):
+ # total across all measurements
+ self.tps = 0
+ self.blockTime = 0
+ self.biByTime = {}
+ # average milliseconds per agreement block verify
+ self.verifyMillis = None
+
+ def process_files(self, args, nick=None, metrics_files=None, bisource=None):
"returns self, a nodestats object"
+ if bisource is None:
+ bisource = {}
self.args = args
self.nick = nick
if metrics_files is None:
+ logger.debug('nodestats(%s) no metrics files', nick)
return self
reportf = None
writer = None
@@ -398,18 +498,30 @@ class nodestats:
prevtime = None
prevPath = None
prevbi = None
+ firstTime = None
+ firstBi = None
for path in sorted(metrics_files):
+ curtime = parse_path_time(path) or os.path.getmtime(path)
+ self.times.append(curtime)
with open(path, 'rt', encoding="utf-8") as fin:
cur = parse_metrics(fin)
+ # TODO: use _any_ node's blockinfo json
bijsonpath = path.replace('.metrics', '.blockinfo.json')
bi = None
if os.path.exists(bijsonpath):
with open(bijsonpath, 'rt', encoding="utf-8") as fin:
bi = json.load(fin)
- curtime = os.path.getmtime(path)
+ self.biByTime[curtime] = bi
+ if bi is None:
+ bi = bisource.get(curtime)
self.txPool.append(cur.get('algod_tx_pool_count{}'))
#logger.debug('%s: %r', path, cur)
+ verifyGood = cur.get('algod_agreement_proposal_verify_good{}')
+ verifyMs = cur.get('algod_agreement_proposal_verify_ms{}')
+ if verifyGood and verifyMs:
+ # last writer wins
+ self.verifyMillis = verifyMs / verifyGood
if prev is not None:
d = metrics_delta(prev, cur)
dt = curtime - prevtime
@@ -451,10 +563,20 @@ class nodestats:
tps,
blocktime,
))
+ else:
+ firstTime = curtime
+ firstBi = bi
prev = cur
prevPath = path
prevtime = curtime
prevbi = bi
+ if prevbi is None or firstBi is None:
+ return self
+ txnCount = prevbi.get('block',{}).get('tc',0) - firstBi.get('block',{}).get('tc',0)
+ rounds = prevbi.get('block',{}).get('rnd',0) - firstBi.get('block',{}).get('rnd',0)
+ totalDt = prevtime - firstTime
+ self.tps = txnCount / totalDt
+ self.blockTime = totalDt / rounds
if writer and self.txBpsList:
writer.writerow([])
for bsum, msg in sorted([(bsum,msg) for msg,bsum in self.txPSums.items()]):
diff --git a/test/heapwatch/nodeHostTarget.py b/test/heapwatch/nodeHostTarget.py
index 5332a1aea..2a29f4f8f 100644
--- a/test/heapwatch/nodeHostTarget.py
+++ b/test/heapwatch/nodeHostTarget.py
@@ -1,4 +1,21 @@
#!/usr/bin/env python3
+# Copyright (C) 2019-2022 Algorand, Inc.
+# This file is part of go-algorand
+#
+# go-algorand is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# go-algorand is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+#
+###
#
# this is the script that runs on a node host started by runNodeHost.py
#
diff --git a/test/heapwatch/plot_crr_csv.py b/test/heapwatch/plot_crr_csv.py
new file mode 100755
index 000000000..14f23b857
--- /dev/null
+++ b/test/heapwatch/plot_crr_csv.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python3
+#
+# Plot the output of test/heapwatch/client_ram_report.py --csv
+
+import csv
+import random
+
+from matplotlib import pyplot as plt
+
+_meta_cols = {'when', 'dt', 'round'}
+
+def smin(a,b):
+ if a is None:
+ return b
+ if b is None:
+ return a
+ return min(a,b)
+def smax(a,b):
+ if a is None:
+ return b
+ if b is None:
+ return a
+ return max(a,b)
+
+def main():
+ import argparse
+ ap = argparse.ArgumentParser()
+ ap.add_argument('files', nargs='+')
+ args = ap.parse_args()
+
+ for fname in args.files:
+ fvals = {}
+ minv = None
+ maxv = None
+ with open(fname) as fin:
+ reader = csv.DictReader(fin)
+ for rec in reader:
+ xround = int(rec['round'])
+ for k,v in rec.items():
+ if k in _meta_cols:
+ continue
+ klist = fvals.get(k)
+ if klist is None:
+ klist = []
+ fvals[k] = klist
+ v = float(v)
+ klist.append((xround, v))
+ minv = smin(minv, v)
+ maxv = smax(maxv, v)
+ print("{} found series {}".format(fname, sorted(fvals.keys())))
+ fig, ax = plt.subplots()
+ ax.set_ylabel('bytes')
+ ax.set_xlabel('round')
+ ax.set_ylim(minv,maxv)
+ for k in sorted(fvals.keys()):
+ xy = fvals[k]
+ #for k, xy in fvals.items():
+ lc = None
+ if k.startswith('r'):
+ # blueish
+ lc = (0.3*random.random(), 0.3*random.random(), 0.7+(0.3*random.random()))
+ elif k.startswith('npn'):
+ # greenish
+ lc = (0.3*random.random(), 0.7+(0.3*random.random()), 0.3*random.random())
+ elif k.startswith('n'):
+ # reddish
+ lc = (0.7+(0.3*random.random()), 0.3*random.random(), 0.3*random.random())
+ ax.plot([p[0] for p in xy], [p[1] for p in xy], label=k, color=lc)
+ ax.legend(loc='upper left', ncol=2)
+ plt.savefig(fname + '.svg', format='svg')
+ plt.savefig(fname + '.png', format='png')
+ #plt.show()
+
+if __name__ == '__main__':
+ main()
diff --git a/test/heapwatch/runNodeHost.py b/test/heapwatch/runNodeHost.py
index 10657907b..1d14881fd 100644
--- a/test/heapwatch/runNodeHost.py
+++ b/test/heapwatch/runNodeHost.py
@@ -1,4 +1,21 @@
#!/usr/bin/python3
+# Copyright (C) 2019-2022 Algorand, Inc.
+# This file is part of go-algorand
+#
+# go-algorand is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# go-algorand is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+#
+###
#
# launch an ec2 instance in the same AZ with the same AMI, run some algod on it
#
diff --git a/test/netperf-go/puppeteer/promMetricFetcher.go b/test/netperf-go/puppeteer/promMetricFetcher.go
index 64a3341a0..06e0853c8 100644
--- a/test/netperf-go/puppeteer/promMetricFetcher.go
+++ b/test/netperf-go/puppeteer/promMetricFetcher.go
@@ -19,7 +19,7 @@ package main
import (
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"reflect"
"strconv"
@@ -62,7 +62,7 @@ func (r *promMetricFetcher) getMetric(query string) (results []promValueResult,
return nil, fmt.Errorf("http error code received %v", resp.StatusCode)
}
- bytes, err := ioutil.ReadAll(resp.Body)
+ bytes, err := io.ReadAll(resp.Body)
if err != nil {
return
}
diff --git a/test/netperf-go/puppeteer/puppeteer.go b/test/netperf-go/puppeteer/puppeteer.go
index bcc870672..5a5fab11c 100644
--- a/test/netperf-go/puppeteer/puppeteer.go
+++ b/test/netperf-go/puppeteer/puppeteer.go
@@ -21,7 +21,6 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
"os"
"os/exec"
"path"
@@ -73,7 +72,7 @@ type puppet struct {
}
func puppeteer(channel, jsonFile string) error {
- jsonBytes, err := ioutil.ReadFile(jsonFile)
+ jsonBytes, err := os.ReadFile(jsonFile)
if err != nil {
return err
}
@@ -367,7 +366,7 @@ func (p *puppet) runStep(recipeStep recipeStep, timeout time.Duration) error {
outFile: os.Stdout,
}
} else {
- output = ioutil.Discard
+ output = io.Discard
}
cmd.Stderr = &errorOutput
diff --git a/test/scripts/e2e.sh b/test/scripts/e2e.sh
index ea829b2e8..b7d04ff4a 100755
--- a/test/scripts/e2e.sh
+++ b/test/scripts/e2e.sh
@@ -125,7 +125,7 @@ if [ -z "$E2E_TEST_FILTER" ] || [ "$E2E_TEST_FILTER" == "SCRIPTS" ]; then
# Pin a version of our python SDK's so that breaking changes don't spuriously break our tests.
# Please update as necessary.
- "${TEMPDIR}/ve/bin/pip3" install py-algorand-sdk==1.9.0b1
+ "${TEMPDIR}/ve/bin/pip3" install py-algorand-sdk==1.17.0
# Enable remote debugging:
"${TEMPDIR}/ve/bin/pip3" install --upgrade debugpy
diff --git a/test/scripts/e2e_basic_start_stop.sh b/test/scripts/e2e_basic_start_stop.sh
index 8d9da35c9..1bb9b0bf1 100755
--- a/test/scripts/e2e_basic_start_stop.sh
+++ b/test/scripts/e2e_basic_start_stop.sh
@@ -28,6 +28,8 @@ function verify_at_least_one_running() {
}
function verify_none_running() {
+ local datadir=$1
+
# Shutting down can take some time, so wait at least 5 seconds
for TRIES in 1 2 3 4 5; do
update_running_count
@@ -37,6 +39,15 @@ function verify_none_running() {
sleep 1.4
done
echo "algod not expected to be running but it is"
+ if [ -n "$datadir" ]; then
+ echo "last 20 lines of node.log:"
+ tail -20 "$datadir/node.log"
+ echo "================================"
+ echo "stdout and stdin:"
+ cat "$datadir/algod-out.log"
+ echo "================================"
+ cat "$datadir/algod-err.log"
+ fi
exit 1
}
@@ -64,7 +75,7 @@ verify_at_least_one_running
echo Verifying we can stop it using goal
goal node stop -d ${DATADIR}
-verify_none_running
+verify_none_running ${DATADIR}
#----------------------
# Test that we can start a generic node straight with no overrides
@@ -72,7 +83,7 @@ echo Verifying a generic node will start directly
algod -d ${DATADIR} &
verify_at_least_one_running
pkill -u $(whoami) -x algod || true
-verify_none_running
+verify_none_running ${DATADIR}
#----------------------
# Test that we can start a generic node against the datadir
@@ -85,7 +96,7 @@ verify_at_least_one_running # one should still be running
verify_one_running # in fact, exactly one should still be running
# clean up
pkill -u $(whoami) -x algod || true
-verify_none_running
+verify_none_running ${DATADIR}
echo "----------------------------------------------------------------------"
echo " DONE: e2e_basic_start_stop"
diff --git a/test/scripts/e2e_client_runner.py b/test/scripts/e2e_client_runner.py
index 6fa5b4ffb..53a0e484e 100755
--- a/test/scripts/e2e_client_runner.py
+++ b/test/scripts/e2e_client_runner.py
@@ -246,10 +246,10 @@ class RunSet:
return self.pubw, self.maxpubaddr
def start(self, scriptname, timeout):
- self.event_log("run", scriptname)
t = threading.Thread(target=script_thread, args=(self, scriptname, timeout))
t.start()
with self.lock:
+ self.event_log("run", scriptname)
self.threads[scriptname] = t
def running(self, scriptname, p):
@@ -257,8 +257,8 @@ class RunSet:
self.procs[scriptname] = p
def done(self, scriptname, ok, seconds):
- self.event_log("pass" if ok else "fail", scriptname, seconds)
with self.lock:
+ self.event_log("pass" if ok else "fail", scriptname, seconds)
self.statuses.append( {'script':scriptname, 'ok':ok, 'seconds':seconds} )
if not ok:
self.errors.append('{} failed'.format(scriptname))
diff --git a/test/scripts/e2e_subs/goal-app-info.sh b/test/scripts/e2e_subs/goal-app-info.sh
new file mode 100755
index 000000000..ef8cee7ac
--- /dev/null
+++ b/test/scripts/e2e_subs/goal-app-info.sh
@@ -0,0 +1,95 @@
+#!/bin/bash
+
+scriptname="goal-app-info-test"
+date "+${scriptname} start %Y%m%d_%H%M%S"
+
+set -e
+set -x
+set -o pipefail
+export SHELLOPTS
+
+WALLET=$1
+
+# Directory of this bash program
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
+gcmd="goal -w ${WALLET}"
+
+ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
+
+EXTRA_PAGES=1
+GLOBAL_BYTESLICES=2
+GLOBAL_INTS=3
+LOCAL_BYTESLICES=4
+LOCAL_INTS=5
+
+APPID=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog ${DIR}/tealprogs/upgraded.teal --clear-prog ${DIR}/tealprogs/clear_program_state.teal --extra-pages ${EXTRA_PAGES} --global-byteslices ${GLOBAL_BYTESLICES} --global-ints ${GLOBAL_INTS} --local-byteslices ${LOCAL_BYTESLICES} --local-ints ${LOCAL_INTS} | grep Created | awk '{ print $6 }')
+
+APP_INFO=$(${gcmd} app info --app-id $APPID)
+
+ACTUAL_APPID=($(echo "$APP_INFO" | grep "Application ID:"))
+ACTUAL_APP_ACCOUNT=($(echo "$APP_INFO" | grep "Application account:"))
+ACTUAL_CREATOR=($(echo "$APP_INFO" | grep "Creator:"))
+ACTUAL_APPROVAL_HASH=($(echo "$APP_INFO" | grep "Approval hash:"))
+ACTUAL_CLEAR_HASH=($(echo "$APP_INFO" | grep "Clear hash:"))
+ACTUAL_EXTRA_PAGES=($(echo "$APP_INFO" | grep "Extra program pages:"))
+ACTUAL_GLOBAL_BYTESLICES=($(echo "$APP_INFO" | grep "Max global byteslices:"))
+ACTUAL_GLOBAL_INTS=($(echo "$APP_INFO" | grep "Max global integers:"))
+ACTUAL_LOCAL_BYTESLICES=($(echo "$APP_INFO" | grep "Max local byteslices:"))
+ACTUAL_LOCAL_INTS=($(echo "$APP_INFO" | grep "Max local integers:"))
+
+if [[ ${APPID} -ne ${ACTUAL_APPID[2]} ]]; then
+ date "+${scriptname} FAIL returned app ID does not match ${APPID} != ${ACTUAL_APPID[2]} %Y%m%d_%H%M%S"
+ false
+fi
+
+# Use the Python SDK to get the expected app escrow address
+EXPECTED_APP_ACCOUNT=$(python3 -c "from algosdk.logic import get_application_address;print(get_application_address($APPID))")
+if [[ $EXPECTED_APP_ACCOUNT != ${ACTUAL_APP_ACCOUNT[2]} ]]; then
+ date "+${scriptname} FAIL returned app account does not match ${EXPECTED_APP_ACCOUNT} != ${ACTUAL_APP_ACCOUNT[2]} %Y%m%d_%H%M%S"
+ false
+fi
+
+if [[ ${ACCOUNT} != ${ACTUAL_CREATOR[1]} ]]; then
+ date "+${scriptname} FAIL returned app creator does not match ${ACCOUNT} != ${ACTUAL_CREATOR[1]} %Y%m%d_%H%M%S"
+ false
+fi
+
+EXPECTED_APPROVAL_HASH="RBHEXJWG2M4T4OBDMNOQFKYYDPDMXQXZIMFZCINJAYVI5KPZLXVUWZRR2Q"
+if [[ ${EXPECTED_APPROVAL_HASH} != ${ACTUAL_APPROVAL_HASH[2]} ]]; then
+ date "+${scriptname} FAIL returned app approval hash does not match ${EXPECTED_APPROVAL_HASH} != ${ACTUAL_APPROVAL_HASH[2]} %Y%m%d_%H%M%S"
+ false
+fi
+
+EXPECTED_CLEAR_HASH="YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA"
+if [[ ${EXPECTED_CLEAR_HASH} != ${ACTUAL_CLEAR_HASH[2]} ]]; then
+ date "+${scriptname} FAIL returned app clear hash does not match ${EXPECTED_CLEAR_HASH} != ${ACTUAL_CLEAR_HASH[2]} %Y%m%d_%H%M%S"
+ false
+fi
+
+if [[ ${EXTRA_PAGES} -ne ${ACTUAL_EXTRA_PAGES[3]} ]]; then
+ date "+${scriptname} FAIL returned app extra pages does not match ${EXTRA_PAGES} != ${ACTUAL_EXTRA_PAGES[3]} %Y%m%d_%H%M%S"
+ false
+fi
+
+if [[ ${GLOBAL_BYTESLICES} -ne ${ACTUAL_GLOBAL_BYTESLICES[3]} ]]; then
+ date "+${scriptname} FAIL returned app global byte slice schema does not match ${GLOBAL_BYTESLICES} != ${ACTUAL_GLOBAL_BYTESLICES[3]} %Y%m%d_%H%M%S"
+ false
+fi
+
+if [[ ${GLOBAL_INTS} -ne ${ACTUAL_GLOBAL_INTS[3]} ]]; then
+ date "+${scriptname} FAIL returned app global int schema does not match ${GLOBAL_INTS} != ${ACTUAL_GLOBAL_INTS[3]} %Y%m%d_%H%M%S"
+ false
+fi
+
+if [[ ${LOCAL_BYTESLICES} -ne ${ACTUAL_LOCAL_BYTESLICES[3]} ]]; then
+ date "+${scriptname} FAIL returned app local byte slice schema does not match ${LOCAL_BYTESLICES} != ${ACTUAL_LOCAL_BYTESLICES[3]} %Y%m%d_%H%M%S"
+ false
+fi
+
+if [[ ${LOCAL_INTS} -ne ${ACTUAL_LOCAL_INTS[3]} ]]; then
+ date "+${scriptname} FAIL returned app local int schema does not match ${LOCAL_INTS} != ${ACTUAL_LOCAL_INTS[3]} %Y%m%d_%H%M%S"
+ false
+fi
+
+date "+${scriptname} OK %Y%m%d_%H%M%S"
diff --git a/test/scripts/tps.py b/test/scripts/tps.py
index 834cdbb7a..103f83a7e 100644
--- a/test/scripts/tps.py
+++ b/test/scripts/tps.py
@@ -37,14 +37,13 @@ def algod_client_for_dir(algorand_data, headers=None):
def get_blockinfo_tps(algod, rounds=10):
status = algod.status()
- rounds = 10
ba = msgpack.loads(algod.block_info(status['last-round']-rounds, response_format='msgpack'), strict_map_key=False)
bb = msgpack.loads(algod.block_info(status['last-round'], response_format='msgpack'), strict_map_key=False)
ra = ba['block']['rnd']
rb = bb['block']['rnd']
assert(rb - ra == rounds)
- tca = ba['block']['tc']
- tcb = bb['block']['tc']
+ tca = ba['block'].get('tc',0)
+ tcb = bb['block'].get('tc',0)
tsa = ba['block']['ts']
tsb = bb['block']['ts']
dt = tsb-tsa
@@ -54,11 +53,57 @@ def get_blockinfo_tps(algod, rounds=10):
logger.debug('(b[%d].TxnCounter %d) - (b[%d].TxnCounter %d) = %d txns', ra, tca, rb, tcb, dtxn)
return tps
+def mins(a,b):
+ if a is None:
+ return b
+ if b is None:
+ return a
+ return min(a,b)
+
+def maxs(a,b):
+ if a is None:
+ return b
+ if b is None:
+ return a
+ return max(a,b)
+
+def get_blockinfo_tps_with_types(algod, rounds=10, adir=''):
+ status = algod.status()
+ lastround = status['last-round']
+ cround = lastround - rounds
+ bytxtype = {}
+ mintime = None
+ maxtime = None
+ mintc = None
+ maxtc = 0
+ while cround <= lastround:
+ ba = msgpack.loads(algod.block_info(cround, response_format='msgpack'), strict_map_key=False)
+ #logger.debug('block keys %s', sorted(ba['block'].keys()))
+ mintime = mins(mintime, ba['block']['ts'])
+ maxtime = maxs(maxtime, ba['block']['ts'])
+ mintc = mins(mintc, ba['block'].get('tc'))
+ maxtc = maxs(maxtc, ba['block'].get('tc',0))
+ txns = ba['block'].get('txns',[])
+ for stxib in txns:
+ #logger.debug('txn keys %s', sorted(stxib['txn'].keys()))
+ tt = stxib['txn']['type']
+ bytxtype[tt] = bytxtype.get(tt, 0) + 1
+ cround += 1
+ summary = [(count, tt) for tt,count in bytxtype.items()]
+ summary.sort(reverse=True)
+ print(summary)
+ dt = maxtime-mintime
+ dtxn = maxtc-mintc
+ logger.debug('%s ts=[%d..%d] (%ds), tc=[%d..%d] (%d txn)', adir, mintime, maxtime, dt, mintc, maxtc, dtxn)
+ tps = dtxn/dt
+ return tps
+
def main():
ap = argparse.ArgumentParser()
ap.add_argument('data_dirs', nargs='*', help='list paths to algorand datadirs to grab heap profile from')
ap.add_argument('-d', dest='algorand_data')
- ap.add_argument('-r', '--rounds', type=int, help='number of rounds to calculate over')
+ ap.add_argument('-T', '--types', default=False, action='store_true', help='show txn types counts within round range')
+ ap.add_argument('-r', '--rounds', type=int, default=10, help='number of rounds to calculate over')
ap.add_argument('--verbose', default=False, action='store_true')
args = ap.parse_args()
@@ -70,11 +115,21 @@ def main():
datadirs = args.data_dirs
if args.algorand_data:
datadirs = datadirs + [args.algorand_data]
+ if not datadirs:
+ ad = os.getenv('ALGORAND_DATA')
+ if ad:
+ datadirs.append(ad)
+ if not datadirs:
+ sys.stderr.write('no data dirs specified (positional file, -d AD, $ALGORAND_DATA)')
+ sys.exit(1)
for adir in datadirs:
algod = algod_client_for_dir(adir)
- tps = get_blockinfo_tps(algod, rounds=args.rounds)
- print('{:5.1f}\t{}'.format(tps, adir))
+ if args.types:
+ tps = get_blockinfo_tps_with_types(algod, rounds=args.rounds)
+ else:
+ tps = get_blockinfo_tps(algod, rounds=args.rounds)
+ print('{:5.1f} TPS\t{}'.format(tps, adir))
return 0
if __name__ == '__main__':
diff --git a/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json b/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json
index e2cc49790..c460542e4 100644
--- a/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json
+++ b/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json
@@ -38,6 +38,18 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS_AP_SOUTHEAST-2-c5d.2xl",
+ "Provider": "AWS",
+ "Region": "ap-southeast-2",
+ "BaseConfiguration": "c5d.2xlarge"
+ },
+ {
+ "Name": "AWS_AP_SOUTHEAST-2-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "ap-southeast-2",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
+ {
"Name": "AWS_AP_SOUTHEAST-2-c5d.9xl",
"Provider": "AWS",
"Region": "ap-southeast-2",
@@ -81,6 +93,18 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS-EU-NORTH-1-c5d.2xl",
+ "Provider": "AWS",
+ "Region": "eu-north-1",
+ "BaseConfiguration": "c5d.2xlarge"
+ },
+ {
+ "Name": "AWS-EU-NORTH-1-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "eu-north-1",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
+ {
"Name": "AWS-EU-NORTH-1-c5d.9xl",
"Provider": "AWS",
"Region": "eu-north-1",
@@ -123,6 +147,18 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS-US-WEST-1-c5d.2xl",
+ "Provider": "AWS",
+ "Region": "us-west-1",
+ "BaseConfiguration": "c5d.2xlarge"
+ },
+ {
+ "Name": "AWS-US-WEST-1-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "us-west-1",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
+ {
"Name": "AWS-US-WEST-1-c5d.9xl",
"Provider": "AWS",
"Region": "us-west-1",
@@ -159,6 +195,18 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS-US-WEST-2-c5d.2xl",
+ "Provider": "AWS",
+ "Region": "us-west-2",
+ "BaseConfiguration": "c5d.2xlarge"
+ },
+ {
+ "Name": "AWS-US-WEST-2-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "us-west-2",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
+ {
"Name": "AWS-US-WEST-2-c5d.9xl",
"Provider": "AWS",
"Region": "us-west-2",
@@ -201,6 +249,12 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS-US-EAST-1-c5d.2xl",
+ "Provider": "AWS",
+ "Region": "us-east-1",
+ "BaseConfiguration": "c5d.2xlarge"
+ },
+ {
"Name": "AWS-US-EAST-1-c5d.4xl",
"Provider": "AWS",
"Region": "us-east-1",
@@ -255,6 +309,12 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS-US-EAST-2-c5d.2xl",
+ "Provider": "AWS",
+ "Region": "us-east-2",
+ "BaseConfiguration": "c5d.2xlarge"
+ },
+ {
"Name": "AWS-US-EAST-2-c5d.4xl",
"Provider": "AWS",
"Region": "us-east-2",
@@ -339,6 +399,18 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS-AP-SOUTH-1-c5d.2xl",
+ "Provider": "AWS",
+ "Region": "ap-south-1",
+ "BaseConfiguration": "c5d.2xlarge"
+ },
+ {
+ "Name": "AWS-AP-SOUTH-1-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "ap-south-1",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
+ {
"Name": "AWS-AP-SOUTH-1-c5d.9xl",
"Provider": "AWS",
"Region": "ap-south-1",
@@ -375,6 +447,18 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS-AP-SOUTHEAST-1-c5d.2xl",
+ "Provider": "AWS",
+ "Region": "ap-southeast-1",
+ "BaseConfiguration": "c5d.2xlarge"
+ },
+ {
+ "Name": "AWS-AP-SOUTHEAST-1-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "ap-southeast-1",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
+ {
"Name": "AWS-AP-SOUTHEAST-1-c5d.9xl",
"Provider": "AWS",
"Region": "ap-southeast-1",
@@ -411,6 +495,18 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS-AP-SOUTHEAST-2-c5d.2xl",
+ "Provider": "AWS",
+ "Region": "ap-southeast-2",
+ "BaseConfiguration": "c5d.2xlarge"
+ },
+ {
+ "Name": "AWS-AP-SOUTHEAST-2-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "ap-southeast-2",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
+ {
"Name": "AWS-AP-SOUTHEAST-2-c5d.9xl",
"Provider": "AWS",
"Region": "ap-southeast-2",
@@ -537,6 +633,18 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS-EU-CENTRAL-1-c5d.2xl",
+ "Provider": "AWS",
+ "Region": "eu-central-1",
+ "BaseConfiguration": "c5d.2xlarge"
+ },
+ {
+ "Name": "AWS-EU-CENTRAL-1-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "eu-central-1",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
+ {
"Name": "AWS-EU-CENTRAL-1-c5d.9xl",
"Provider": "AWS",
"Region": "eu-central-1",
@@ -609,6 +717,18 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS-EU-WEST-1-c5d.2xl",
+ "Provider": "AWS",
+ "Region": "eu-west-1",
+ "BaseConfiguration": "c5d.2xlarge"
+ },
+ {
+ "Name": "AWS-EU-WEST-1-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "eu-west-1",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
+ {
"Name": "AWS-EU-WEST-1-c5d.9xl",
"Provider": "AWS",
"Region": "eu-west-1",
@@ -645,6 +765,18 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS-EU-WEST-2-c5d.2xl",
+ "Provider": "AWS",
+ "Region": "eu-west-2",
+ "BaseConfiguration": "c5d.2xlarge"
+ },
+ {
+ "Name": "AWS-EU-WEST-2-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "eu-west-2",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
+ {
"Name": "AWS-EU-WEST-2-c5d.9xl",
"Provider": "AWS",
"Region": "eu-west-2",
@@ -681,6 +813,18 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS-EU-WEST-3-c5d.2xl",
+ "Provider": "AWS",
+ "Region": "eu-west-3",
+ "BaseConfiguration": "c5d.2xlarge"
+ },
+ {
+ "Name": "AWS-EU-WEST-3-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "eu-west-3",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
+ {
"Name": "AWS-EU-WEST-3-c5d.9xl",
"Provider": "AWS",
"Region": "eu-west-3",
@@ -718,6 +862,18 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS-SA-EAST-1-c5d.2xl",
+ "Provider": "AWS",
+ "Region": "sa-east-1",
+ "BaseConfiguration": "c5d.2xlarge"
+ },
+ {
+ "Name": "AWS-SA-EAST-1-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "sa-east-1",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
+ {
"Name": "AWS-SA-EAST-1-c5d.9xl",
"Provider": "AWS",
"Region": "sa-east-1",
@@ -742,6 +898,18 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS-CA-CENTRAL-1-c5d.2xl",
+ "Provider": "AWS",
+ "Region": "ca-central-1",
+ "BaseConfiguration": "c5d.2xlarge"
+ },
+ {
+ "Name": "AWS-CA-CENTRAL-1-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "ca-central-1",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
+ {
"Name": "AWS-CA-CENTRAL-1-c5d.9xl",
"Provider": "AWS",
"Region": "ca-central-1",
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/Makefile b/test/testdata/deployednettemplates/recipes/alphanet-extension/Makefile
new file mode 100644
index 000000000..2a7d45039
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/Makefile
@@ -0,0 +1,15 @@
+PARAMS=-w 20 -R 1 -N 20 -n 20 -H 1 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+
+all: topology.json net.json genesis.json
+
+topology.json: gen_topology.py
+ python gen_topology.py
+
+net.json: node.json relay.json nonPartNode.json ${GOPATH}/bin/netgoal Makefile
+ netgoal generate -t net -r /tmp/wat -o net.json ${PARAMS}
+
+genesis.json: node.json relay.json nonPartNode.json ${GOPATH}/bin/netgoal Makefile
+ netgoal generate -t genesis -r /tmp/wat -o genesis.json ${PARAMS}
+
+clean:
+ rm -f net.json genesis.json topology.json
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/gen_topology.py b/test/testdata/deployednettemplates/recipes/alphanet-extension/gen_topology.py
new file mode 100644
index 000000000..c95245647
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/gen_topology.py
@@ -0,0 +1,31 @@
+import json
+import os
+
+node_types = {"R":1, "N":20, "NPN":1}
+node_size = {"R":"-c5d.4xl", "N":"-c5d.4xl", "NPN":"-c5d.4xl"}
+regions = [
+ "AWS-US-EAST-1",
+ "AWS-US-WEST-1",
+ "AWS-SA-EAST-1",
+ "AWS-EU-NORTH-1",
+ "AWS-AP-SOUTHEAST-1"
+]
+
+network = "alphanet"
+
+host_elements = []
+region_count = len(regions)
+for node_type in node_types.keys():
+ node_count = node_types[node_type]
+ region_size = node_size[node_type]
+ for i in range(node_count):
+ host = {}
+ node_name = node_type + str(i + 1) + "-" + network
+ region = regions[i % region_count]
+ host["Name"] = node_name
+ host["Template"] = region + region_size
+ host_elements.append(host)
+
+ec2_hosts = {"Hosts": host_elements}
+with open("topology.json", "w") as f:
+ f.write(json.dumps(ec2_hosts, indent = 2) + os.linesep)
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/genesis.json b/test/testdata/deployednettemplates/recipes/alphanet-extension/genesis.json
new file mode 100644
index 000000000..0be2b3267
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/genesis.json
@@ -0,0 +1,154 @@
+{
+ "NetworkName": "",
+ "VersionModifier": "",
+ "ConsensusProtocol": "alpha4",
+ "FirstPartKeyRound": 0,
+ "LastPartKeyRound": 50000,
+ "PartKeyDilution": 0,
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet3",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet4",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet5",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet6",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet7",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet8",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet9",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet10",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet11",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet12",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet13",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet14",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet15",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet16",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet17",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet18",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet19",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet20",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet21",
+ "Stake": 6.25,
+ "Online": false
+ },
+ {
+ "Name": "Wallet22",
+ "Stake": 6.25,
+ "Online": false
+ },
+ {
+ "Name": "Wallet23",
+ "Stake": 6.25,
+ "Online": false
+ },
+ {
+ "Name": "Wallet24",
+ "Stake": 6.25,
+ "Online": false
+ },
+ {
+ "Name": "Wallet25",
+ "Stake": 6.25,
+ "Online": false
+ },
+ {
+ "Name": "Wallet26",
+ "Stake": 6.25,
+ "Online": false
+ },
+ {
+ "Name": "Wallet27",
+ "Stake": 6.25,
+ "Online": false
+ },
+ {
+ "Name": "Wallet28",
+ "Stake": 6.25,
+ "Online": false
+ }
+ ],
+ "FeeSink": "OOZZ32IHB6SS6ZTARKJ2PQP3QKE7R3IWQTOPXRGLTAGPVCDS3FHJOEOYVM",
+ "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "DevMode": false,
+ "Comment": ""
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/net.json b/test/testdata/deployednettemplates/recipes/alphanet-extension/net.json
new file mode 100644
index 000000000..0e2afae8b
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/net.json
@@ -0,0 +1,504 @@
+{
+ "Hosts": [
+ {
+ "Name": "R1-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay1",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N1-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node1",
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N2-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node2",
+ "Wallets": [
+ {
+ "Name": "Wallet2",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N3-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node3",
+ "Wallets": [
+ {
+ "Name": "Wallet3",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N4-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node4",
+ "Wallets": [
+ {
+ "Name": "Wallet4",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N5-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node5",
+ "Wallets": [
+ {
+ "Name": "Wallet5",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N6-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node6",
+ "Wallets": [
+ {
+ "Name": "Wallet6",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N7-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node7",
+ "Wallets": [
+ {
+ "Name": "Wallet7",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N8-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node8",
+ "Wallets": [
+ {
+ "Name": "Wallet8",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N9-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node9",
+ "Wallets": [
+ {
+ "Name": "Wallet9",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N10-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node10",
+ "Wallets": [
+ {
+ "Name": "Wallet10",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N11-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node11",
+ "Wallets": [
+ {
+ "Name": "Wallet11",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N12-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node12",
+ "Wallets": [
+ {
+ "Name": "Wallet12",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N13-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node13",
+ "Wallets": [
+ {
+ "Name": "Wallet13",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N14-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node14",
+ "Wallets": [
+ {
+ "Name": "Wallet14",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N15-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node15",
+ "Wallets": [
+ {
+ "Name": "Wallet15",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N16-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node16",
+ "Wallets": [
+ {
+ "Name": "Wallet16",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N17-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node17",
+ "Wallets": [
+ {
+ "Name": "Wallet17",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N18-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node18",
+ "Wallets": [
+ {
+ "Name": "Wallet18",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N19-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node19",
+ "Wallets": [
+ {
+ "Name": "Wallet19",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N20-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node20",
+ "Wallets": [
+ {
+ "Name": "Wallet20",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/node.json b/test/testdata/deployednettemplates/recipes/alphanet-extension/node.json
new file mode 100644
index 000000000..d3b429ee3
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/node.json
@@ -0,0 +1,10 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/nonPartNode.json b/test/testdata/deployednettemplates/recipes/alphanet-extension/nonPartNode.json
new file mode 100644
index 000000000..5b0a52d9d
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/nonPartNode.json
@@ -0,0 +1,5 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/recipe.json b/test/testdata/deployednettemplates/recipes/alphanet-extension/recipe.json
new file mode 100644
index 000000000..a2f88f63b
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/recipe.json
@@ -0,0 +1,7 @@
+{
+ "GenesisFile":"genesis.json",
+ "NetworkFile":"net.json",
+ "ConfigFile": "../../configs/reference.json",
+ "HostTemplatesFile": "../../hosttemplates/hosttemplates.json",
+ "TopologyFile": "topology.json"
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/relay.json b/test/testdata/deployednettemplates/recipes/alphanet-extension/relay.json
new file mode 100644
index 000000000..db8fb939d
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/relay.json
@@ -0,0 +1,11 @@
+{
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/topology.json b/test/testdata/deployednettemplates/recipes/alphanet-extension/topology.json
new file mode 100644
index 000000000..cbf980c94
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/topology.json
@@ -0,0 +1,88 @@
+{
+ "Hosts": [
+ {
+ "Name": "R1-alphanet",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N1-alphanet",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N2-alphanet",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "N3-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N4-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "N5-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N6-alphanet",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N7-alphanet",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "N8-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N9-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "N10-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N11-alphanet",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N12-alphanet",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "N13-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N14-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "N15-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N16-alphanet",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N17-alphanet",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "N18-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N19-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "N20-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/Makefile b/test/testdata/deployednettemplates/recipes/alphanet/Makefile
index 13130934d..4cb3c207d 100644
--- a/test/testdata/deployednettemplates/recipes/alphanet/Makefile
+++ b/test/testdata/deployednettemplates/recipes/alphanet/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 8 -R 1 -N 4 -n 8 -H 2 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 20 -R 5 -N 20 -n 20 -H 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
all: topology.json net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py b/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py
index 7298256d8..ae4344210 100644
--- a/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py
+++ b/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py
@@ -1,15 +1,14 @@
import json
import os
-node_types = {"R":1, "N":4, "NPN":2}
-node_size = {"R":"-m5d.4xl", "N":"-m5d.4xl", "NPN":"-m5d.4xl"}
+node_types = {"R":5, "N":20, "NPN":10}
+node_size = {"R":"-c5d.4xl", "N":"-c5d.4xl", "NPN":"-c5d.4xl"}
regions = [
- "AWS-US-EAST-2",
- "AWS-US-WEST-2",
- "AWS-EU-CENTRAL-1",
- "AWS-EU-WEST-2",
- "AWS-AP-SOUTHEAST-1",
- "AWS-AP-SOUTHEAST-2"
+ "AWS-US-EAST-1",
+ "AWS-US-WEST-1",
+ "AWS-SA-EAST-1",
+ "AWS-EU-NORTH-1",
+ "AWS-AP-SOUTHEAST-1"
]
network = "alphanet"
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/genesis.json b/test/testdata/deployednettemplates/recipes/alphanet/genesis.json
index 1d78dd782..d0c1b7e41 100644
--- a/test/testdata/deployednettemplates/recipes/alphanet/genesis.json
+++ b/test/testdata/deployednettemplates/recipes/alphanet/genesis.json
@@ -1,59 +1,159 @@
{
- "NetworkName": "alphanet",
+ "NetworkName": "",
"VersionModifier": "",
- "ConsensusProtocol": "alpha1",
+ "ConsensusProtocol": "alpha4",
"FirstPartKeyRound": 0,
- "LastPartKeyRound": 3000000,
+ "LastPartKeyRound": 50000,
"PartKeyDilution": 0,
"Wallets": [
{
"Name": "Wallet1",
- "Stake": 6.25,
+ "Stake": 2.5,
"Online": true
},
{
"Name": "Wallet2",
- "Stake": 6.25,
+ "Stake": 2.5,
"Online": true
},
{
"Name": "Wallet3",
- "Stake": 6.25,
+ "Stake": 2.5,
"Online": true
},
{
"Name": "Wallet4",
- "Stake": 6.25,
+ "Stake": 2.5,
"Online": true
},
{
"Name": "Wallet5",
- "Stake": 6.25,
+ "Stake": 2.5,
"Online": true
},
{
"Name": "Wallet6",
- "Stake": 6.25,
+ "Stake": 2.5,
"Online": true
},
{
"Name": "Wallet7",
- "Stake": 6.25,
+ "Stake": 2.5,
"Online": true
},
{
"Name": "Wallet8",
- "Stake": 6.25,
+ "Stake": 2.5,
"Online": true
},
{
"Name": "Wallet9",
- "Stake": 25,
- "Online": false
+ "Stake": 2.5,
+ "Online": true
},
{
"Name": "Wallet10",
- "Stake": 25,
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet11",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet12",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet13",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet14",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet15",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet16",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet17",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet18",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet19",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet20",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet21",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet22",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet23",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet24",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet25",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet26",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet27",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet28",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet29",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet30",
+ "Stake": 5,
"Online": false
}
],
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/net.json b/test/testdata/deployednettemplates/recipes/alphanet/net.json
index e75a91d29..0fed02432 100644
--- a/test/testdata/deployednettemplates/recipes/alphanet/net.json
+++ b/test/testdata/deployednettemplates/recipes/alphanet/net.json
@@ -21,6 +21,86 @@
]
},
{
+ "Name": "R2-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay2",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R3-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay3",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R4-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay4",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R5-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay5",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
"Name": "N1-alphanet",
"Group": "",
"Nodes": [
@@ -41,15 +121,22 @@
"EnableService": false,
"EnableBlockStats": true,
"ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
- },
+ }
+ ]
+ },
+ {
+ "Name": "N2-alphanet",
+ "Group": "",
+ "Nodes": [
{
- "Name": "node5",
+ "Name": "node2",
"Wallets": [
{
"Name": "Wallet2",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
@@ -62,11 +149,11 @@
]
},
{
- "Name": "N2-alphanet",
+ "Name": "N3-alphanet",
"Group": "",
"Nodes": [
{
- "Name": "node2",
+ "Name": "node3",
"Wallets": [
{
"Name": "Wallet3",
@@ -82,15 +169,22 @@
"EnableService": false,
"EnableBlockStats": true,
"ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
- },
+ }
+ ]
+ },
+ {
+ "Name": "N4-alphanet",
+ "Group": "",
+ "Nodes": [
{
- "Name": "node6",
+ "Name": "node4",
"Wallets": [
{
"Name": "Wallet4",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
@@ -103,11 +197,11 @@
]
},
{
- "Name": "N3-alphanet",
+ "Name": "N5-alphanet",
"Group": "",
"Nodes": [
{
- "Name": "node3",
+ "Name": "node5",
"Wallets": [
{
"Name": "Wallet5",
@@ -123,15 +217,22 @@
"EnableService": false,
"EnableBlockStats": true,
"ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
- },
+ }
+ ]
+ },
+ {
+ "Name": "N6-alphanet",
+ "Group": "",
+ "Nodes": [
{
- "Name": "node7",
+ "Name": "node6",
"Wallets": [
{
"Name": "Wallet6",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
@@ -144,11 +245,11 @@
]
},
{
- "Name": "N4-alphanet",
+ "Name": "N7-alphanet",
"Group": "",
"Nodes": [
{
- "Name": "node4",
+ "Name": "node7",
"Wallets": [
{
"Name": "Wallet7",
@@ -164,7 +265,13 @@
"EnableService": false,
"EnableBlockStats": true,
"ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
- },
+ }
+ ]
+ },
+ {
+ "Name": "N8-alphanet",
+ "Group": "",
+ "Nodes": [
{
"Name": "node8",
"Wallets": [
@@ -173,6 +280,295 @@
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N9-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node9",
+ "Wallets": [
+ {
+ "Name": "Wallet9",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N10-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node10",
+ "Wallets": [
+ {
+ "Name": "Wallet10",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N11-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node11",
+ "Wallets": [
+ {
+ "Name": "Wallet11",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N12-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node12",
+ "Wallets": [
+ {
+ "Name": "Wallet12",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N13-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node13",
+ "Wallets": [
+ {
+ "Name": "Wallet13",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N14-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node14",
+ "Wallets": [
+ {
+ "Name": "Wallet14",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N15-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node15",
+ "Wallets": [
+ {
+ "Name": "Wallet15",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N16-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node16",
+ "Wallets": [
+ {
+ "Name": "Wallet16",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N17-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node17",
+ "Wallets": [
+ {
+ "Name": "Wallet17",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N18-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node18",
+ "Wallets": [
+ {
+ "Name": "Wallet18",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N19-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node19",
+ "Wallets": [
+ {
+ "Name": "Wallet19",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N20-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node20",
+ "Wallets": [
+ {
+ "Name": "Wallet20",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
@@ -192,7 +588,7 @@
"Name": "nonParticipatingNode1",
"Wallets": [
{
- "Name": "Wallet9",
+ "Name": "Wallet21",
"ParticipationOnly": false
}
],
@@ -214,7 +610,183 @@
"Name": "nonParticipatingNode2",
"Wallets": [
{
- "Name": "Wallet10",
+ "Name": "Wallet22",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN3-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode3",
+ "Wallets": [
+ {
+ "Name": "Wallet23",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN4-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode4",
+ "Wallets": [
+ {
+ "Name": "Wallet24",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN5-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode5",
+ "Wallets": [
+ {
+ "Name": "Wallet25",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN6-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode6",
+ "Wallets": [
+ {
+ "Name": "Wallet26",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN7-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode7",
+ "Wallets": [
+ {
+ "Name": "Wallet27",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN8-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode8",
+ "Wallets": [
+ {
+ "Name": "Wallet28",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN9-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode9",
+ "Wallets": [
+ {
+ "Name": "Wallet29",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN10-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode10",
+ "Wallets": [
+ {
+ "Name": "Wallet30",
"ParticipationOnly": false
}
],
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/topology.json b/test/testdata/deployednettemplates/recipes/alphanet/topology.json
index 8760eae20..35cb3a098 100644
--- a/test/testdata/deployednettemplates/recipes/alphanet/topology.json
+++ b/test/testdata/deployednettemplates/recipes/alphanet/topology.json
@@ -2,31 +2,143 @@
"Hosts": [
{
"Name": "R1-alphanet",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "R2-alphanet",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "R3-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "R4-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "R5-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
},
{
"Name": "N1-alphanet",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Template": "AWS-US-EAST-1-c5d.4xl"
},
{
"Name": "N2-alphanet",
- "Template": "AWS-US-WEST-2-m5d.4xl"
+ "Template": "AWS-US-WEST-1-c5d.4xl"
},
{
"Name": "N3-alphanet",
- "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
},
{
"Name": "N4-alphanet",
- "Template": "AWS-EU-WEST-2-m5d.4xl"
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "N5-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N6-alphanet",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N7-alphanet",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "N8-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N9-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "N10-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N11-alphanet",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N12-alphanet",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "N13-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N14-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "N15-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N16-alphanet",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N17-alphanet",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "N18-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N19-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "N20-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
},
{
"Name": "NPN1-alphanet",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Template": "AWS-US-EAST-1-c5d.4xl"
},
{
"Name": "NPN2-alphanet",
- "Template": "AWS-US-WEST-2-m5d.4xl"
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "NPN3-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "NPN4-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "NPN5-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ },
+ {
+ "Name": "NPN6-alphanet",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "NPN7-alphanet",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "NPN8-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "NPN9-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "NPN10-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
}
]
}
diff --git a/test/testdata/deployednettemplates/recipes/betanet-model-2/Makefile b/test/testdata/deployednettemplates/recipes/betanet-model-2/Makefile
new file mode 100644
index 000000000..256d329a0
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/betanet-model-2/Makefile
@@ -0,0 +1,15 @@
+PARAMS=-w 20 -R 5 -N 20 -n 20 -H 50 -X 500 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+
+all: topology.json net.json genesis.json
+
+topology.json: gen_topology.py
+ python gen_topology.py
+
+net.json: node.json relay.json nonPartNode.json ${GOPATH}/bin/netgoal Makefile
+ netgoal generate -t net -r /tmp/wat -o net.json ${PARAMS}
+
+genesis.json: node.json relay.json nonPartNode.json ${GOPATH}/bin/netgoal Makefile
+ netgoal generate -t genesis -r /tmp/wat -o genesis.json ${PARAMS}
+
+clean:
+ rm -f net.json genesis.json topology.json
diff --git a/test/testdata/deployednettemplates/recipes/betanet-model-2/gen_topology.py b/test/testdata/deployednettemplates/recipes/betanet-model-2/gen_topology.py
new file mode 100644
index 000000000..f04d70472
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/betanet-model-2/gen_topology.py
@@ -0,0 +1,31 @@
+import json
+import os
+
+node_types = {"R":5, "N":20, "NPN":50}
+node_size = {"R":"-c5d.4xl", "N":"-c5d.2xl", "NPN":"-Small"}
+regions = [
+ "AWS-US-EAST-1",
+ "AWS-US-WEST-1",
+ "AWS-SA-EAST-1",
+ "AWS-EU-NORTH-1",
+ "AWS-AP-SOUTHEAST-1"
+]
+
+network = "betanet-model-2"
+
+host_elements = []
+region_count = len(regions)
+for node_type in node_types.keys():
+ node_count = node_types[node_type]
+ region_size = node_size[node_type]
+ for i in range(node_count):
+ host = {}
+ node_name = node_type + str(i + 1) + "-" + network
+ region = regions[i % region_count]
+ host["Name"] = node_name
+ host["Template"] = region + region_size
+ host_elements.append(host)
+
+ec2_hosts = {"Hosts": host_elements}
+with open("topology.json", "w") as f:
+ f.write(json.dumps(ec2_hosts, indent = 2) + os.linesep)
diff --git a/test/testdata/deployednettemplates/recipes/betanet-model-2/genesis.json b/test/testdata/deployednettemplates/recipes/betanet-model-2/genesis.json
new file mode 100644
index 000000000..c42024986
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/betanet-model-2/genesis.json
@@ -0,0 +1,2614 @@
+{
+ "NetworkName": "betanet-model-2",
+ "VersionModifier": "",
+ "ConsensusProtocol": "future",
+ "FirstPartKeyRound": 0,
+ "LastPartKeyRound": 50000,
+ "PartKeyDilution": 0,
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet3",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet4",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet5",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet6",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet7",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet8",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet9",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet10",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet11",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet12",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet13",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet14",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet15",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet16",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet17",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet18",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet19",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet20",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet21",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet22",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet23",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet24",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet25",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet26",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet27",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet28",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet29",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet30",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet31",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet32",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet33",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet34",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet35",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet36",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet37",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet38",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet39",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet40",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet41",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet42",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet43",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet44",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet45",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet46",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet47",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet48",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet49",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet50",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet51",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet52",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet53",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet54",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet55",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet56",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet57",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet58",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet59",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet60",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet61",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet62",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet63",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet64",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet65",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet66",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet67",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet68",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet69",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet70",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet71",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet72",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet73",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet74",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet75",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet76",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet77",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet78",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet79",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet80",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet81",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet82",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet83",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet84",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet85",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet86",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet87",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet88",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet89",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet90",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet91",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet92",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet93",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet94",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet95",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet96",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet97",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet98",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet99",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet100",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet101",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet102",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet103",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet104",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet105",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet106",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet107",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet108",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet109",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet110",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet111",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet112",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet113",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet114",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet115",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet116",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet117",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet118",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet119",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet120",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet121",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet122",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet123",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet124",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet125",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet126",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet127",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet128",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet129",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet130",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet131",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet132",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet133",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet134",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet135",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet136",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet137",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet138",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet139",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet140",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet141",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet142",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet143",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet144",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet145",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet146",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet147",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet148",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet149",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet150",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet151",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet152",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet153",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet154",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet155",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet156",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet157",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet158",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet159",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet160",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet161",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet162",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet163",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet164",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet165",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet166",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet167",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet168",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet169",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet170",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet171",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet172",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet173",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet174",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet175",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet176",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet177",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet178",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet179",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet180",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet181",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet182",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet183",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet184",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet185",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet186",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet187",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet188",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet189",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet190",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet191",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet192",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet193",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet194",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet195",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet196",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet197",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet198",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet199",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet200",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet201",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet202",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet203",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet204",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet205",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet206",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet207",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet208",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet209",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet210",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet211",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet212",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet213",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet214",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet215",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet216",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet217",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet218",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet219",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet220",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet221",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet222",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet223",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet224",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet225",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet226",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet227",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet228",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet229",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet230",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet231",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet232",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet233",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet234",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet235",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet236",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet237",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet238",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet239",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet240",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet241",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet242",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet243",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet244",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet245",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet246",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet247",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet248",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet249",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet250",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet251",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet252",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet253",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet254",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet255",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet256",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet257",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet258",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet259",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet260",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet261",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet262",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet263",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet264",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet265",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet266",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet267",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet268",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet269",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet270",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet271",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet272",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet273",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet274",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet275",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet276",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet277",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet278",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet279",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet280",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet281",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet282",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet283",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet284",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet285",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet286",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet287",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet288",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet289",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet290",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet291",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet292",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet293",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet294",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet295",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet296",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet297",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet298",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet299",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet300",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet301",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet302",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet303",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet304",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet305",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet306",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet307",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet308",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet309",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet310",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet311",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet312",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet313",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet314",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet315",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet316",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet317",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet318",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet319",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet320",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet321",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet322",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet323",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet324",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet325",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet326",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet327",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet328",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet329",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet330",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet331",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet332",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet333",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet334",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet335",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet336",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet337",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet338",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet339",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet340",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet341",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet342",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet343",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet344",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet345",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet346",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet347",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet348",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet349",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet350",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet351",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet352",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet353",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet354",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet355",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet356",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet357",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet358",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet359",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet360",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet361",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet362",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet363",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet364",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet365",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet366",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet367",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet368",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet369",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet370",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet371",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet372",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet373",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet374",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet375",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet376",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet377",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet378",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet379",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet380",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet381",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet382",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet383",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet384",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet385",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet386",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet387",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet388",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet389",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet390",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet391",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet392",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet393",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet394",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet395",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet396",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet397",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet398",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet399",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet400",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet401",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet402",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet403",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet404",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet405",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet406",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet407",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet408",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet409",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet410",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet411",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet412",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet413",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet414",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet415",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet416",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet417",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet418",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet419",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet420",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet421",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet422",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet423",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet424",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet425",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet426",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet427",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet428",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet429",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet430",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet431",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet432",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet433",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet434",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet435",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet436",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet437",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet438",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet439",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet440",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet441",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet442",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet443",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet444",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet445",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet446",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet447",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet448",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet449",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet450",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet451",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet452",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet453",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet454",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet455",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet456",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet457",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet458",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet459",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet460",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet461",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet462",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet463",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet464",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet465",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet466",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet467",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet468",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet469",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet470",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet471",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet472",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet473",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet474",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet475",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet476",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet477",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet478",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet479",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet480",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet481",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet482",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet483",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet484",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet485",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet486",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet487",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet488",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet489",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet490",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet491",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet492",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet493",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet494",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet495",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet496",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet497",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet498",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet499",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet500",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet501",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet502",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet503",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet504",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet505",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet506",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet507",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet508",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet509",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet510",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet511",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet512",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet513",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet514",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet515",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet516",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet517",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet518",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet519",
+ "Stake": 0.1,
+ "Online": false
+ },
+ {
+ "Name": "Wallet520",
+ "Stake": 0.1,
+ "Online": false
+ }
+ ],
+ "FeeSink": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "DevMode": false,
+ "Comment": ""
+}
diff --git a/test/testdata/deployednettemplates/recipes/betanet-model-2/net.json b/test/testdata/deployednettemplates/recipes/betanet-model-2/net.json
new file mode 100644
index 000000000..5716de0f6
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/betanet-model-2/net.json
@@ -0,0 +1,8434 @@
+{
+ "Hosts": [
+ {
+ "Name": "R1-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay1",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R2-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay2",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R3-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay3",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R4-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay4",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R5-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay5",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N1-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node1",
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N2-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node2",
+ "Wallets": [
+ {
+ "Name": "Wallet2",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N3-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node3",
+ "Wallets": [
+ {
+ "Name": "Wallet3",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N4-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node4",
+ "Wallets": [
+ {
+ "Name": "Wallet4",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N5-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node5",
+ "Wallets": [
+ {
+ "Name": "Wallet5",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N6-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node6",
+ "Wallets": [
+ {
+ "Name": "Wallet6",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N7-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node7",
+ "Wallets": [
+ {
+ "Name": "Wallet7",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N8-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node8",
+ "Wallets": [
+ {
+ "Name": "Wallet8",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N9-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node9",
+ "Wallets": [
+ {
+ "Name": "Wallet9",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N10-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node10",
+ "Wallets": [
+ {
+ "Name": "Wallet10",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N11-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node11",
+ "Wallets": [
+ {
+ "Name": "Wallet11",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N12-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node12",
+ "Wallets": [
+ {
+ "Name": "Wallet12",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N13-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node13",
+ "Wallets": [
+ {
+ "Name": "Wallet13",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N14-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node14",
+ "Wallets": [
+ {
+ "Name": "Wallet14",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N15-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node15",
+ "Wallets": [
+ {
+ "Name": "Wallet15",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N16-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node16",
+ "Wallets": [
+ {
+ "Name": "Wallet16",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N17-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node17",
+ "Wallets": [
+ {
+ "Name": "Wallet17",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N18-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node18",
+ "Wallets": [
+ {
+ "Name": "Wallet18",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N19-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node19",
+ "Wallets": [
+ {
+ "Name": "Wallet19",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N20-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node20",
+ "Wallets": [
+ {
+ "Name": "Wallet20",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "NPN1-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode1",
+ "Wallets": [
+ {
+ "Name": "Wallet21",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode51",
+ "Wallets": [
+ {
+ "Name": "Wallet22",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode101",
+ "Wallets": [
+ {
+ "Name": "Wallet23",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode151",
+ "Wallets": [
+ {
+ "Name": "Wallet24",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode201",
+ "Wallets": [
+ {
+ "Name": "Wallet25",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode251",
+ "Wallets": [
+ {
+ "Name": "Wallet26",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode301",
+ "Wallets": [
+ {
+ "Name": "Wallet27",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode351",
+ "Wallets": [
+ {
+ "Name": "Wallet28",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode401",
+ "Wallets": [
+ {
+ "Name": "Wallet29",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode451",
+ "Wallets": [
+ {
+ "Name": "Wallet30",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN2-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode2",
+ "Wallets": [
+ {
+ "Name": "Wallet31",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode52",
+ "Wallets": [
+ {
+ "Name": "Wallet32",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode102",
+ "Wallets": [
+ {
+ "Name": "Wallet33",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode152",
+ "Wallets": [
+ {
+ "Name": "Wallet34",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode202",
+ "Wallets": [
+ {
+ "Name": "Wallet35",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode252",
+ "Wallets": [
+ {
+ "Name": "Wallet36",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode302",
+ "Wallets": [
+ {
+ "Name": "Wallet37",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode352",
+ "Wallets": [
+ {
+ "Name": "Wallet38",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode402",
+ "Wallets": [
+ {
+ "Name": "Wallet39",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode452",
+ "Wallets": [
+ {
+ "Name": "Wallet40",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN3-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode3",
+ "Wallets": [
+ {
+ "Name": "Wallet41",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode53",
+ "Wallets": [
+ {
+ "Name": "Wallet42",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode103",
+ "Wallets": [
+ {
+ "Name": "Wallet43",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode153",
+ "Wallets": [
+ {
+ "Name": "Wallet44",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode203",
+ "Wallets": [
+ {
+ "Name": "Wallet45",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode253",
+ "Wallets": [
+ {
+ "Name": "Wallet46",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode303",
+ "Wallets": [
+ {
+ "Name": "Wallet47",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode353",
+ "Wallets": [
+ {
+ "Name": "Wallet48",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode403",
+ "Wallets": [
+ {
+ "Name": "Wallet49",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode453",
+ "Wallets": [
+ {
+ "Name": "Wallet50",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN4-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode4",
+ "Wallets": [
+ {
+ "Name": "Wallet51",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode54",
+ "Wallets": [
+ {
+ "Name": "Wallet52",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode104",
+ "Wallets": [
+ {
+ "Name": "Wallet53",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode154",
+ "Wallets": [
+ {
+ "Name": "Wallet54",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode204",
+ "Wallets": [
+ {
+ "Name": "Wallet55",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode254",
+ "Wallets": [
+ {
+ "Name": "Wallet56",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode304",
+ "Wallets": [
+ {
+ "Name": "Wallet57",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode354",
+ "Wallets": [
+ {
+ "Name": "Wallet58",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode404",
+ "Wallets": [
+ {
+ "Name": "Wallet59",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode454",
+ "Wallets": [
+ {
+ "Name": "Wallet60",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN5-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode5",
+ "Wallets": [
+ {
+ "Name": "Wallet61",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode55",
+ "Wallets": [
+ {
+ "Name": "Wallet62",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode105",
+ "Wallets": [
+ {
+ "Name": "Wallet63",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode155",
+ "Wallets": [
+ {
+ "Name": "Wallet64",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode205",
+ "Wallets": [
+ {
+ "Name": "Wallet65",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode255",
+ "Wallets": [
+ {
+ "Name": "Wallet66",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode305",
+ "Wallets": [
+ {
+ "Name": "Wallet67",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode355",
+ "Wallets": [
+ {
+ "Name": "Wallet68",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode405",
+ "Wallets": [
+ {
+ "Name": "Wallet69",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode455",
+ "Wallets": [
+ {
+ "Name": "Wallet70",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN6-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode6",
+ "Wallets": [
+ {
+ "Name": "Wallet71",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode56",
+ "Wallets": [
+ {
+ "Name": "Wallet72",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode106",
+ "Wallets": [
+ {
+ "Name": "Wallet73",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode156",
+ "Wallets": [
+ {
+ "Name": "Wallet74",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode206",
+ "Wallets": [
+ {
+ "Name": "Wallet75",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode256",
+ "Wallets": [
+ {
+ "Name": "Wallet76",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode306",
+ "Wallets": [
+ {
+ "Name": "Wallet77",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode356",
+ "Wallets": [
+ {
+ "Name": "Wallet78",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode406",
+ "Wallets": [
+ {
+ "Name": "Wallet79",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode456",
+ "Wallets": [
+ {
+ "Name": "Wallet80",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN7-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode7",
+ "Wallets": [
+ {
+ "Name": "Wallet81",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode57",
+ "Wallets": [
+ {
+ "Name": "Wallet82",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode107",
+ "Wallets": [
+ {
+ "Name": "Wallet83",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode157",
+ "Wallets": [
+ {
+ "Name": "Wallet84",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode207",
+ "Wallets": [
+ {
+ "Name": "Wallet85",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode257",
+ "Wallets": [
+ {
+ "Name": "Wallet86",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode307",
+ "Wallets": [
+ {
+ "Name": "Wallet87",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode357",
+ "Wallets": [
+ {
+ "Name": "Wallet88",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode407",
+ "Wallets": [
+ {
+ "Name": "Wallet89",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode457",
+ "Wallets": [
+ {
+ "Name": "Wallet90",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN8-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode8",
+ "Wallets": [
+ {
+ "Name": "Wallet91",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode58",
+ "Wallets": [
+ {
+ "Name": "Wallet92",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode108",
+ "Wallets": [
+ {
+ "Name": "Wallet93",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode158",
+ "Wallets": [
+ {
+ "Name": "Wallet94",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode208",
+ "Wallets": [
+ {
+ "Name": "Wallet95",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode258",
+ "Wallets": [
+ {
+ "Name": "Wallet96",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode308",
+ "Wallets": [
+ {
+ "Name": "Wallet97",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode358",
+ "Wallets": [
+ {
+ "Name": "Wallet98",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode408",
+ "Wallets": [
+ {
+ "Name": "Wallet99",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode458",
+ "Wallets": [
+ {
+ "Name": "Wallet100",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN9-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode9",
+ "Wallets": [
+ {
+ "Name": "Wallet101",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode59",
+ "Wallets": [
+ {
+ "Name": "Wallet102",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode109",
+ "Wallets": [
+ {
+ "Name": "Wallet103",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode159",
+ "Wallets": [
+ {
+ "Name": "Wallet104",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode209",
+ "Wallets": [
+ {
+ "Name": "Wallet105",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode259",
+ "Wallets": [
+ {
+ "Name": "Wallet106",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode309",
+ "Wallets": [
+ {
+ "Name": "Wallet107",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode359",
+ "Wallets": [
+ {
+ "Name": "Wallet108",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode409",
+ "Wallets": [
+ {
+ "Name": "Wallet109",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode459",
+ "Wallets": [
+ {
+ "Name": "Wallet110",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN10-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode10",
+ "Wallets": [
+ {
+ "Name": "Wallet111",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode60",
+ "Wallets": [
+ {
+ "Name": "Wallet112",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode110",
+ "Wallets": [
+ {
+ "Name": "Wallet113",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode160",
+ "Wallets": [
+ {
+ "Name": "Wallet114",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode210",
+ "Wallets": [
+ {
+ "Name": "Wallet115",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode260",
+ "Wallets": [
+ {
+ "Name": "Wallet116",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode310",
+ "Wallets": [
+ {
+ "Name": "Wallet117",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode360",
+ "Wallets": [
+ {
+ "Name": "Wallet118",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode410",
+ "Wallets": [
+ {
+ "Name": "Wallet119",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode460",
+ "Wallets": [
+ {
+ "Name": "Wallet120",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN11-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode11",
+ "Wallets": [
+ {
+ "Name": "Wallet121",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode61",
+ "Wallets": [
+ {
+ "Name": "Wallet122",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode111",
+ "Wallets": [
+ {
+ "Name": "Wallet123",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode161",
+ "Wallets": [
+ {
+ "Name": "Wallet124",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode211",
+ "Wallets": [
+ {
+ "Name": "Wallet125",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode261",
+ "Wallets": [
+ {
+ "Name": "Wallet126",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode311",
+ "Wallets": [
+ {
+ "Name": "Wallet127",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode361",
+ "Wallets": [
+ {
+ "Name": "Wallet128",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode411",
+ "Wallets": [
+ {
+ "Name": "Wallet129",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode461",
+ "Wallets": [
+ {
+ "Name": "Wallet130",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN12-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode12",
+ "Wallets": [
+ {
+ "Name": "Wallet131",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode62",
+ "Wallets": [
+ {
+ "Name": "Wallet132",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode112",
+ "Wallets": [
+ {
+ "Name": "Wallet133",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode162",
+ "Wallets": [
+ {
+ "Name": "Wallet134",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode212",
+ "Wallets": [
+ {
+ "Name": "Wallet135",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode262",
+ "Wallets": [
+ {
+ "Name": "Wallet136",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode312",
+ "Wallets": [
+ {
+ "Name": "Wallet137",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode362",
+ "Wallets": [
+ {
+ "Name": "Wallet138",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode412",
+ "Wallets": [
+ {
+ "Name": "Wallet139",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode462",
+ "Wallets": [
+ {
+ "Name": "Wallet140",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN13-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode13",
+ "Wallets": [
+ {
+ "Name": "Wallet141",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode63",
+ "Wallets": [
+ {
+ "Name": "Wallet142",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode113",
+ "Wallets": [
+ {
+ "Name": "Wallet143",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode163",
+ "Wallets": [
+ {
+ "Name": "Wallet144",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode213",
+ "Wallets": [
+ {
+ "Name": "Wallet145",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode263",
+ "Wallets": [
+ {
+ "Name": "Wallet146",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode313",
+ "Wallets": [
+ {
+ "Name": "Wallet147",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode363",
+ "Wallets": [
+ {
+ "Name": "Wallet148",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode413",
+ "Wallets": [
+ {
+ "Name": "Wallet149",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode463",
+ "Wallets": [
+ {
+ "Name": "Wallet150",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN14-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode14",
+ "Wallets": [
+ {
+ "Name": "Wallet151",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode64",
+ "Wallets": [
+ {
+ "Name": "Wallet152",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode114",
+ "Wallets": [
+ {
+ "Name": "Wallet153",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode164",
+ "Wallets": [
+ {
+ "Name": "Wallet154",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode214",
+ "Wallets": [
+ {
+ "Name": "Wallet155",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode264",
+ "Wallets": [
+ {
+ "Name": "Wallet156",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode314",
+ "Wallets": [
+ {
+ "Name": "Wallet157",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode364",
+ "Wallets": [
+ {
+ "Name": "Wallet158",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode414",
+ "Wallets": [
+ {
+ "Name": "Wallet159",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode464",
+ "Wallets": [
+ {
+ "Name": "Wallet160",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN15-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode15",
+ "Wallets": [
+ {
+ "Name": "Wallet161",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode65",
+ "Wallets": [
+ {
+ "Name": "Wallet162",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode115",
+ "Wallets": [
+ {
+ "Name": "Wallet163",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode165",
+ "Wallets": [
+ {
+ "Name": "Wallet164",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode215",
+ "Wallets": [
+ {
+ "Name": "Wallet165",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode265",
+ "Wallets": [
+ {
+ "Name": "Wallet166",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode315",
+ "Wallets": [
+ {
+ "Name": "Wallet167",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode365",
+ "Wallets": [
+ {
+ "Name": "Wallet168",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode415",
+ "Wallets": [
+ {
+ "Name": "Wallet169",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode465",
+ "Wallets": [
+ {
+ "Name": "Wallet170",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN16-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode16",
+ "Wallets": [
+ {
+ "Name": "Wallet171",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode66",
+ "Wallets": [
+ {
+ "Name": "Wallet172",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode116",
+ "Wallets": [
+ {
+ "Name": "Wallet173",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode166",
+ "Wallets": [
+ {
+ "Name": "Wallet174",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode216",
+ "Wallets": [
+ {
+ "Name": "Wallet175",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode266",
+ "Wallets": [
+ {
+ "Name": "Wallet176",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode316",
+ "Wallets": [
+ {
+ "Name": "Wallet177",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode366",
+ "Wallets": [
+ {
+ "Name": "Wallet178",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode416",
+ "Wallets": [
+ {
+ "Name": "Wallet179",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode466",
+ "Wallets": [
+ {
+ "Name": "Wallet180",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN17-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode17",
+ "Wallets": [
+ {
+ "Name": "Wallet181",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode67",
+ "Wallets": [
+ {
+ "Name": "Wallet182",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode117",
+ "Wallets": [
+ {
+ "Name": "Wallet183",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode167",
+ "Wallets": [
+ {
+ "Name": "Wallet184",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode217",
+ "Wallets": [
+ {
+ "Name": "Wallet185",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode267",
+ "Wallets": [
+ {
+ "Name": "Wallet186",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode317",
+ "Wallets": [
+ {
+ "Name": "Wallet187",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode367",
+ "Wallets": [
+ {
+ "Name": "Wallet188",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode417",
+ "Wallets": [
+ {
+ "Name": "Wallet189",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode467",
+ "Wallets": [
+ {
+ "Name": "Wallet190",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN18-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode18",
+ "Wallets": [
+ {
+ "Name": "Wallet191",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode68",
+ "Wallets": [
+ {
+ "Name": "Wallet192",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode118",
+ "Wallets": [
+ {
+ "Name": "Wallet193",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode168",
+ "Wallets": [
+ {
+ "Name": "Wallet194",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode218",
+ "Wallets": [
+ {
+ "Name": "Wallet195",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode268",
+ "Wallets": [
+ {
+ "Name": "Wallet196",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode318",
+ "Wallets": [
+ {
+ "Name": "Wallet197",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode368",
+ "Wallets": [
+ {
+ "Name": "Wallet198",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode418",
+ "Wallets": [
+ {
+ "Name": "Wallet199",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode468",
+ "Wallets": [
+ {
+ "Name": "Wallet200",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN19-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode19",
+ "Wallets": [
+ {
+ "Name": "Wallet201",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode69",
+ "Wallets": [
+ {
+ "Name": "Wallet202",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode119",
+ "Wallets": [
+ {
+ "Name": "Wallet203",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode169",
+ "Wallets": [
+ {
+ "Name": "Wallet204",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode219",
+ "Wallets": [
+ {
+ "Name": "Wallet205",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode269",
+ "Wallets": [
+ {
+ "Name": "Wallet206",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode319",
+ "Wallets": [
+ {
+ "Name": "Wallet207",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode369",
+ "Wallets": [
+ {
+ "Name": "Wallet208",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode419",
+ "Wallets": [
+ {
+ "Name": "Wallet209",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode469",
+ "Wallets": [
+ {
+ "Name": "Wallet210",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN20-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode20",
+ "Wallets": [
+ {
+ "Name": "Wallet211",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode70",
+ "Wallets": [
+ {
+ "Name": "Wallet212",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode120",
+ "Wallets": [
+ {
+ "Name": "Wallet213",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode170",
+ "Wallets": [
+ {
+ "Name": "Wallet214",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode220",
+ "Wallets": [
+ {
+ "Name": "Wallet215",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode270",
+ "Wallets": [
+ {
+ "Name": "Wallet216",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode320",
+ "Wallets": [
+ {
+ "Name": "Wallet217",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode370",
+ "Wallets": [
+ {
+ "Name": "Wallet218",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode420",
+ "Wallets": [
+ {
+ "Name": "Wallet219",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode470",
+ "Wallets": [
+ {
+ "Name": "Wallet220",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN21-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode21",
+ "Wallets": [
+ {
+ "Name": "Wallet221",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode71",
+ "Wallets": [
+ {
+ "Name": "Wallet222",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode121",
+ "Wallets": [
+ {
+ "Name": "Wallet223",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode171",
+ "Wallets": [
+ {
+ "Name": "Wallet224",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode221",
+ "Wallets": [
+ {
+ "Name": "Wallet225",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode271",
+ "Wallets": [
+ {
+ "Name": "Wallet226",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode321",
+ "Wallets": [
+ {
+ "Name": "Wallet227",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode371",
+ "Wallets": [
+ {
+ "Name": "Wallet228",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode421",
+ "Wallets": [
+ {
+ "Name": "Wallet229",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode471",
+ "Wallets": [
+ {
+ "Name": "Wallet230",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN22-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode22",
+ "Wallets": [
+ {
+ "Name": "Wallet231",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode72",
+ "Wallets": [
+ {
+ "Name": "Wallet232",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode122",
+ "Wallets": [
+ {
+ "Name": "Wallet233",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode172",
+ "Wallets": [
+ {
+ "Name": "Wallet234",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode222",
+ "Wallets": [
+ {
+ "Name": "Wallet235",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode272",
+ "Wallets": [
+ {
+ "Name": "Wallet236",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode322",
+ "Wallets": [
+ {
+ "Name": "Wallet237",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode372",
+ "Wallets": [
+ {
+ "Name": "Wallet238",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode422",
+ "Wallets": [
+ {
+ "Name": "Wallet239",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode472",
+ "Wallets": [
+ {
+ "Name": "Wallet240",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN23-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode23",
+ "Wallets": [
+ {
+ "Name": "Wallet241",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode73",
+ "Wallets": [
+ {
+ "Name": "Wallet242",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode123",
+ "Wallets": [
+ {
+ "Name": "Wallet243",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode173",
+ "Wallets": [
+ {
+ "Name": "Wallet244",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode223",
+ "Wallets": [
+ {
+ "Name": "Wallet245",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode273",
+ "Wallets": [
+ {
+ "Name": "Wallet246",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode323",
+ "Wallets": [
+ {
+ "Name": "Wallet247",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode373",
+ "Wallets": [
+ {
+ "Name": "Wallet248",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode423",
+ "Wallets": [
+ {
+ "Name": "Wallet249",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode473",
+ "Wallets": [
+ {
+ "Name": "Wallet250",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN24-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode24",
+ "Wallets": [
+ {
+ "Name": "Wallet251",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode74",
+ "Wallets": [
+ {
+ "Name": "Wallet252",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode124",
+ "Wallets": [
+ {
+ "Name": "Wallet253",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode174",
+ "Wallets": [
+ {
+ "Name": "Wallet254",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode224",
+ "Wallets": [
+ {
+ "Name": "Wallet255",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode274",
+ "Wallets": [
+ {
+ "Name": "Wallet256",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode324",
+ "Wallets": [
+ {
+ "Name": "Wallet257",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode374",
+ "Wallets": [
+ {
+ "Name": "Wallet258",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode424",
+ "Wallets": [
+ {
+ "Name": "Wallet259",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode474",
+ "Wallets": [
+ {
+ "Name": "Wallet260",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN25-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode25",
+ "Wallets": [
+ {
+ "Name": "Wallet261",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode75",
+ "Wallets": [
+ {
+ "Name": "Wallet262",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode125",
+ "Wallets": [
+ {
+ "Name": "Wallet263",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode175",
+ "Wallets": [
+ {
+ "Name": "Wallet264",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode225",
+ "Wallets": [
+ {
+ "Name": "Wallet265",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode275",
+ "Wallets": [
+ {
+ "Name": "Wallet266",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode325",
+ "Wallets": [
+ {
+ "Name": "Wallet267",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode375",
+ "Wallets": [
+ {
+ "Name": "Wallet268",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode425",
+ "Wallets": [
+ {
+ "Name": "Wallet269",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode475",
+ "Wallets": [
+ {
+ "Name": "Wallet270",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN26-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode26",
+ "Wallets": [
+ {
+ "Name": "Wallet271",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode76",
+ "Wallets": [
+ {
+ "Name": "Wallet272",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode126",
+ "Wallets": [
+ {
+ "Name": "Wallet273",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode176",
+ "Wallets": [
+ {
+ "Name": "Wallet274",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode226",
+ "Wallets": [
+ {
+ "Name": "Wallet275",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode276",
+ "Wallets": [
+ {
+ "Name": "Wallet276",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode326",
+ "Wallets": [
+ {
+ "Name": "Wallet277",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode376",
+ "Wallets": [
+ {
+ "Name": "Wallet278",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode426",
+ "Wallets": [
+ {
+ "Name": "Wallet279",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode476",
+ "Wallets": [
+ {
+ "Name": "Wallet280",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN27-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode27",
+ "Wallets": [
+ {
+ "Name": "Wallet281",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode77",
+ "Wallets": [
+ {
+ "Name": "Wallet282",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode127",
+ "Wallets": [
+ {
+ "Name": "Wallet283",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode177",
+ "Wallets": [
+ {
+ "Name": "Wallet284",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode227",
+ "Wallets": [
+ {
+ "Name": "Wallet285",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode277",
+ "Wallets": [
+ {
+ "Name": "Wallet286",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode327",
+ "Wallets": [
+ {
+ "Name": "Wallet287",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode377",
+ "Wallets": [
+ {
+ "Name": "Wallet288",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode427",
+ "Wallets": [
+ {
+ "Name": "Wallet289",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode477",
+ "Wallets": [
+ {
+ "Name": "Wallet290",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN28-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode28",
+ "Wallets": [
+ {
+ "Name": "Wallet291",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode78",
+ "Wallets": [
+ {
+ "Name": "Wallet292",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode128",
+ "Wallets": [
+ {
+ "Name": "Wallet293",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode178",
+ "Wallets": [
+ {
+ "Name": "Wallet294",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode228",
+ "Wallets": [
+ {
+ "Name": "Wallet295",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode278",
+ "Wallets": [
+ {
+ "Name": "Wallet296",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode328",
+ "Wallets": [
+ {
+ "Name": "Wallet297",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode378",
+ "Wallets": [
+ {
+ "Name": "Wallet298",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode428",
+ "Wallets": [
+ {
+ "Name": "Wallet299",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode478",
+ "Wallets": [
+ {
+ "Name": "Wallet300",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN29-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode29",
+ "Wallets": [
+ {
+ "Name": "Wallet301",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode79",
+ "Wallets": [
+ {
+ "Name": "Wallet302",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode129",
+ "Wallets": [
+ {
+ "Name": "Wallet303",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode179",
+ "Wallets": [
+ {
+ "Name": "Wallet304",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode229",
+ "Wallets": [
+ {
+ "Name": "Wallet305",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode279",
+ "Wallets": [
+ {
+ "Name": "Wallet306",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode329",
+ "Wallets": [
+ {
+ "Name": "Wallet307",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode379",
+ "Wallets": [
+ {
+ "Name": "Wallet308",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode429",
+ "Wallets": [
+ {
+ "Name": "Wallet309",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode479",
+ "Wallets": [
+ {
+ "Name": "Wallet310",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN30-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode30",
+ "Wallets": [
+ {
+ "Name": "Wallet311",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode80",
+ "Wallets": [
+ {
+ "Name": "Wallet312",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode130",
+ "Wallets": [
+ {
+ "Name": "Wallet313",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode180",
+ "Wallets": [
+ {
+ "Name": "Wallet314",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode230",
+ "Wallets": [
+ {
+ "Name": "Wallet315",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode280",
+ "Wallets": [
+ {
+ "Name": "Wallet316",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode330",
+ "Wallets": [
+ {
+ "Name": "Wallet317",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode380",
+ "Wallets": [
+ {
+ "Name": "Wallet318",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode430",
+ "Wallets": [
+ {
+ "Name": "Wallet319",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode480",
+ "Wallets": [
+ {
+ "Name": "Wallet320",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN31-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode31",
+ "Wallets": [
+ {
+ "Name": "Wallet321",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode81",
+ "Wallets": [
+ {
+ "Name": "Wallet322",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode131",
+ "Wallets": [
+ {
+ "Name": "Wallet323",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode181",
+ "Wallets": [
+ {
+ "Name": "Wallet324",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode231",
+ "Wallets": [
+ {
+ "Name": "Wallet325",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode281",
+ "Wallets": [
+ {
+ "Name": "Wallet326",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode331",
+ "Wallets": [
+ {
+ "Name": "Wallet327",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode381",
+ "Wallets": [
+ {
+ "Name": "Wallet328",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode431",
+ "Wallets": [
+ {
+ "Name": "Wallet329",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode481",
+ "Wallets": [
+ {
+ "Name": "Wallet330",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN32-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode32",
+ "Wallets": [
+ {
+ "Name": "Wallet331",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode82",
+ "Wallets": [
+ {
+ "Name": "Wallet332",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode132",
+ "Wallets": [
+ {
+ "Name": "Wallet333",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode182",
+ "Wallets": [
+ {
+ "Name": "Wallet334",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode232",
+ "Wallets": [
+ {
+ "Name": "Wallet335",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode282",
+ "Wallets": [
+ {
+ "Name": "Wallet336",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode332",
+ "Wallets": [
+ {
+ "Name": "Wallet337",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode382",
+ "Wallets": [
+ {
+ "Name": "Wallet338",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode432",
+ "Wallets": [
+ {
+ "Name": "Wallet339",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode482",
+ "Wallets": [
+ {
+ "Name": "Wallet340",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN33-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode33",
+ "Wallets": [
+ {
+ "Name": "Wallet341",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode83",
+ "Wallets": [
+ {
+ "Name": "Wallet342",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode133",
+ "Wallets": [
+ {
+ "Name": "Wallet343",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode183",
+ "Wallets": [
+ {
+ "Name": "Wallet344",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode233",
+ "Wallets": [
+ {
+ "Name": "Wallet345",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode283",
+ "Wallets": [
+ {
+ "Name": "Wallet346",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode333",
+ "Wallets": [
+ {
+ "Name": "Wallet347",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode383",
+ "Wallets": [
+ {
+ "Name": "Wallet348",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode433",
+ "Wallets": [
+ {
+ "Name": "Wallet349",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode483",
+ "Wallets": [
+ {
+ "Name": "Wallet350",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN34-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode34",
+ "Wallets": [
+ {
+ "Name": "Wallet351",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode84",
+ "Wallets": [
+ {
+ "Name": "Wallet352",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode134",
+ "Wallets": [
+ {
+ "Name": "Wallet353",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode184",
+ "Wallets": [
+ {
+ "Name": "Wallet354",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode234",
+ "Wallets": [
+ {
+ "Name": "Wallet355",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode284",
+ "Wallets": [
+ {
+ "Name": "Wallet356",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode334",
+ "Wallets": [
+ {
+ "Name": "Wallet357",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode384",
+ "Wallets": [
+ {
+ "Name": "Wallet358",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode434",
+ "Wallets": [
+ {
+ "Name": "Wallet359",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode484",
+ "Wallets": [
+ {
+ "Name": "Wallet360",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN35-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode35",
+ "Wallets": [
+ {
+ "Name": "Wallet361",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode85",
+ "Wallets": [
+ {
+ "Name": "Wallet362",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode135",
+ "Wallets": [
+ {
+ "Name": "Wallet363",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode185",
+ "Wallets": [
+ {
+ "Name": "Wallet364",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode235",
+ "Wallets": [
+ {
+ "Name": "Wallet365",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode285",
+ "Wallets": [
+ {
+ "Name": "Wallet366",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode335",
+ "Wallets": [
+ {
+ "Name": "Wallet367",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode385",
+ "Wallets": [
+ {
+ "Name": "Wallet368",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode435",
+ "Wallets": [
+ {
+ "Name": "Wallet369",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode485",
+ "Wallets": [
+ {
+ "Name": "Wallet370",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN36-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode36",
+ "Wallets": [
+ {
+ "Name": "Wallet371",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode86",
+ "Wallets": [
+ {
+ "Name": "Wallet372",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode136",
+ "Wallets": [
+ {
+ "Name": "Wallet373",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode186",
+ "Wallets": [
+ {
+ "Name": "Wallet374",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode236",
+ "Wallets": [
+ {
+ "Name": "Wallet375",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode286",
+ "Wallets": [
+ {
+ "Name": "Wallet376",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode336",
+ "Wallets": [
+ {
+ "Name": "Wallet377",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode386",
+ "Wallets": [
+ {
+ "Name": "Wallet378",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode436",
+ "Wallets": [
+ {
+ "Name": "Wallet379",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode486",
+ "Wallets": [
+ {
+ "Name": "Wallet380",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN37-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode37",
+ "Wallets": [
+ {
+ "Name": "Wallet381",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode87",
+ "Wallets": [
+ {
+ "Name": "Wallet382",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode137",
+ "Wallets": [
+ {
+ "Name": "Wallet383",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode187",
+ "Wallets": [
+ {
+ "Name": "Wallet384",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode237",
+ "Wallets": [
+ {
+ "Name": "Wallet385",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode287",
+ "Wallets": [
+ {
+ "Name": "Wallet386",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode337",
+ "Wallets": [
+ {
+ "Name": "Wallet387",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode387",
+ "Wallets": [
+ {
+ "Name": "Wallet388",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode437",
+ "Wallets": [
+ {
+ "Name": "Wallet389",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode487",
+ "Wallets": [
+ {
+ "Name": "Wallet390",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN38-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode38",
+ "Wallets": [
+ {
+ "Name": "Wallet391",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode88",
+ "Wallets": [
+ {
+ "Name": "Wallet392",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode138",
+ "Wallets": [
+ {
+ "Name": "Wallet393",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode188",
+ "Wallets": [
+ {
+ "Name": "Wallet394",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode238",
+ "Wallets": [
+ {
+ "Name": "Wallet395",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode288",
+ "Wallets": [
+ {
+ "Name": "Wallet396",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode338",
+ "Wallets": [
+ {
+ "Name": "Wallet397",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode388",
+ "Wallets": [
+ {
+ "Name": "Wallet398",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode438",
+ "Wallets": [
+ {
+ "Name": "Wallet399",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode488",
+ "Wallets": [
+ {
+ "Name": "Wallet400",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN39-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode39",
+ "Wallets": [
+ {
+ "Name": "Wallet401",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode89",
+ "Wallets": [
+ {
+ "Name": "Wallet402",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode139",
+ "Wallets": [
+ {
+ "Name": "Wallet403",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode189",
+ "Wallets": [
+ {
+ "Name": "Wallet404",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode239",
+ "Wallets": [
+ {
+ "Name": "Wallet405",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode289",
+ "Wallets": [
+ {
+ "Name": "Wallet406",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode339",
+ "Wallets": [
+ {
+ "Name": "Wallet407",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode389",
+ "Wallets": [
+ {
+ "Name": "Wallet408",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode439",
+ "Wallets": [
+ {
+ "Name": "Wallet409",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode489",
+ "Wallets": [
+ {
+ "Name": "Wallet410",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN40-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode40",
+ "Wallets": [
+ {
+ "Name": "Wallet411",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode90",
+ "Wallets": [
+ {
+ "Name": "Wallet412",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode140",
+ "Wallets": [
+ {
+ "Name": "Wallet413",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode190",
+ "Wallets": [
+ {
+ "Name": "Wallet414",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode240",
+ "Wallets": [
+ {
+ "Name": "Wallet415",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode290",
+ "Wallets": [
+ {
+ "Name": "Wallet416",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode340",
+ "Wallets": [
+ {
+ "Name": "Wallet417",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode390",
+ "Wallets": [
+ {
+ "Name": "Wallet418",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode440",
+ "Wallets": [
+ {
+ "Name": "Wallet419",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode490",
+ "Wallets": [
+ {
+ "Name": "Wallet420",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN41-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode41",
+ "Wallets": [
+ {
+ "Name": "Wallet421",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode91",
+ "Wallets": [
+ {
+ "Name": "Wallet422",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode141",
+ "Wallets": [
+ {
+ "Name": "Wallet423",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode191",
+ "Wallets": [
+ {
+ "Name": "Wallet424",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode241",
+ "Wallets": [
+ {
+ "Name": "Wallet425",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode291",
+ "Wallets": [
+ {
+ "Name": "Wallet426",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode341",
+ "Wallets": [
+ {
+ "Name": "Wallet427",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode391",
+ "Wallets": [
+ {
+ "Name": "Wallet428",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode441",
+ "Wallets": [
+ {
+ "Name": "Wallet429",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode491",
+ "Wallets": [
+ {
+ "Name": "Wallet430",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN42-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode42",
+ "Wallets": [
+ {
+ "Name": "Wallet431",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode92",
+ "Wallets": [
+ {
+ "Name": "Wallet432",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode142",
+ "Wallets": [
+ {
+ "Name": "Wallet433",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode192",
+ "Wallets": [
+ {
+ "Name": "Wallet434",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode242",
+ "Wallets": [
+ {
+ "Name": "Wallet435",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode292",
+ "Wallets": [
+ {
+ "Name": "Wallet436",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode342",
+ "Wallets": [
+ {
+ "Name": "Wallet437",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode392",
+ "Wallets": [
+ {
+ "Name": "Wallet438",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode442",
+ "Wallets": [
+ {
+ "Name": "Wallet439",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode492",
+ "Wallets": [
+ {
+ "Name": "Wallet440",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN43-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode43",
+ "Wallets": [
+ {
+ "Name": "Wallet441",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode93",
+ "Wallets": [
+ {
+ "Name": "Wallet442",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode143",
+ "Wallets": [
+ {
+ "Name": "Wallet443",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode193",
+ "Wallets": [
+ {
+ "Name": "Wallet444",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode243",
+ "Wallets": [
+ {
+ "Name": "Wallet445",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode293",
+ "Wallets": [
+ {
+ "Name": "Wallet446",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode343",
+ "Wallets": [
+ {
+ "Name": "Wallet447",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode393",
+ "Wallets": [
+ {
+ "Name": "Wallet448",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode443",
+ "Wallets": [
+ {
+ "Name": "Wallet449",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode493",
+ "Wallets": [
+ {
+ "Name": "Wallet450",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN44-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode44",
+ "Wallets": [
+ {
+ "Name": "Wallet451",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode94",
+ "Wallets": [
+ {
+ "Name": "Wallet452",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode144",
+ "Wallets": [
+ {
+ "Name": "Wallet453",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode194",
+ "Wallets": [
+ {
+ "Name": "Wallet454",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode244",
+ "Wallets": [
+ {
+ "Name": "Wallet455",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode294",
+ "Wallets": [
+ {
+ "Name": "Wallet456",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode344",
+ "Wallets": [
+ {
+ "Name": "Wallet457",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode394",
+ "Wallets": [
+ {
+ "Name": "Wallet458",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode444",
+ "Wallets": [
+ {
+ "Name": "Wallet459",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode494",
+ "Wallets": [
+ {
+ "Name": "Wallet460",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN45-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode45",
+ "Wallets": [
+ {
+ "Name": "Wallet461",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode95",
+ "Wallets": [
+ {
+ "Name": "Wallet462",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode145",
+ "Wallets": [
+ {
+ "Name": "Wallet463",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode195",
+ "Wallets": [
+ {
+ "Name": "Wallet464",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode245",
+ "Wallets": [
+ {
+ "Name": "Wallet465",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode295",
+ "Wallets": [
+ {
+ "Name": "Wallet466",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode345",
+ "Wallets": [
+ {
+ "Name": "Wallet467",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode395",
+ "Wallets": [
+ {
+ "Name": "Wallet468",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode445",
+ "Wallets": [
+ {
+ "Name": "Wallet469",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode495",
+ "Wallets": [
+ {
+ "Name": "Wallet470",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN46-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode46",
+ "Wallets": [
+ {
+ "Name": "Wallet471",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode96",
+ "Wallets": [
+ {
+ "Name": "Wallet472",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode146",
+ "Wallets": [
+ {
+ "Name": "Wallet473",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode196",
+ "Wallets": [
+ {
+ "Name": "Wallet474",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode246",
+ "Wallets": [
+ {
+ "Name": "Wallet475",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode296",
+ "Wallets": [
+ {
+ "Name": "Wallet476",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode346",
+ "Wallets": [
+ {
+ "Name": "Wallet477",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode396",
+ "Wallets": [
+ {
+ "Name": "Wallet478",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode446",
+ "Wallets": [
+ {
+ "Name": "Wallet479",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode496",
+ "Wallets": [
+ {
+ "Name": "Wallet480",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN47-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode47",
+ "Wallets": [
+ {
+ "Name": "Wallet481",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode97",
+ "Wallets": [
+ {
+ "Name": "Wallet482",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode147",
+ "Wallets": [
+ {
+ "Name": "Wallet483",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode197",
+ "Wallets": [
+ {
+ "Name": "Wallet484",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode247",
+ "Wallets": [
+ {
+ "Name": "Wallet485",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode297",
+ "Wallets": [
+ {
+ "Name": "Wallet486",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode347",
+ "Wallets": [
+ {
+ "Name": "Wallet487",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode397",
+ "Wallets": [
+ {
+ "Name": "Wallet488",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode447",
+ "Wallets": [
+ {
+ "Name": "Wallet489",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode497",
+ "Wallets": [
+ {
+ "Name": "Wallet490",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN48-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode48",
+ "Wallets": [
+ {
+ "Name": "Wallet491",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode98",
+ "Wallets": [
+ {
+ "Name": "Wallet492",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode148",
+ "Wallets": [
+ {
+ "Name": "Wallet493",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode198",
+ "Wallets": [
+ {
+ "Name": "Wallet494",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode248",
+ "Wallets": [
+ {
+ "Name": "Wallet495",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode298",
+ "Wallets": [
+ {
+ "Name": "Wallet496",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode348",
+ "Wallets": [
+ {
+ "Name": "Wallet497",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode398",
+ "Wallets": [
+ {
+ "Name": "Wallet498",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode448",
+ "Wallets": [
+ {
+ "Name": "Wallet499",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode498",
+ "Wallets": [
+ {
+ "Name": "Wallet500",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN49-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode49",
+ "Wallets": [
+ {
+ "Name": "Wallet501",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode99",
+ "Wallets": [
+ {
+ "Name": "Wallet502",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode149",
+ "Wallets": [
+ {
+ "Name": "Wallet503",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode199",
+ "Wallets": [
+ {
+ "Name": "Wallet504",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode249",
+ "Wallets": [
+ {
+ "Name": "Wallet505",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode299",
+ "Wallets": [
+ {
+ "Name": "Wallet506",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode349",
+ "Wallets": [
+ {
+ "Name": "Wallet507",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode399",
+ "Wallets": [
+ {
+ "Name": "Wallet508",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode449",
+ "Wallets": [
+ {
+ "Name": "Wallet509",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode499",
+ "Wallets": [
+ {
+ "Name": "Wallet510",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN50-betanet-model-2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode50",
+ "Wallets": [
+ {
+ "Name": "Wallet511",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode100",
+ "Wallets": [
+ {
+ "Name": "Wallet512",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode150",
+ "Wallets": [
+ {
+ "Name": "Wallet513",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode200",
+ "Wallets": [
+ {
+ "Name": "Wallet514",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode250",
+ "Wallets": [
+ {
+ "Name": "Wallet515",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode300",
+ "Wallets": [
+ {
+ "Name": "Wallet516",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode350",
+ "Wallets": [
+ {
+ "Name": "Wallet517",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode400",
+ "Wallets": [
+ {
+ "Name": "Wallet518",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode450",
+ "Wallets": [
+ {
+ "Name": "Wallet519",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ },
+ {
+ "Name": "nonParticipatingNode500",
+ "Wallets": [
+ {
+ "Name": "Wallet520",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/betanet-model-2/node.json b/test/testdata/deployednettemplates/recipes/betanet-model-2/node.json
new file mode 100644
index 000000000..d3b429ee3
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/betanet-model-2/node.json
@@ -0,0 +1,10 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+}
diff --git a/test/testdata/deployednettemplates/recipes/betanet-model-2/nonPartNode.json b/test/testdata/deployednettemplates/recipes/betanet-model-2/nonPartNode.json
new file mode 100644
index 000000000..5b0a52d9d
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/betanet-model-2/nonPartNode.json
@@ -0,0 +1,5 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+}
diff --git a/test/testdata/deployednettemplates/recipes/betanet-model-2/recipe.json b/test/testdata/deployednettemplates/recipes/betanet-model-2/recipe.json
new file mode 100644
index 000000000..a2f88f63b
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/betanet-model-2/recipe.json
@@ -0,0 +1,7 @@
+{
+ "GenesisFile":"genesis.json",
+ "NetworkFile":"net.json",
+ "ConfigFile": "../../configs/reference.json",
+ "HostTemplatesFile": "../../hosttemplates/hosttemplates.json",
+ "TopologyFile": "topology.json"
+}
diff --git a/test/testdata/deployednettemplates/recipes/betanet-model-2/relay.json b/test/testdata/deployednettemplates/recipes/betanet-model-2/relay.json
new file mode 100644
index 000000000..db8fb939d
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/betanet-model-2/relay.json
@@ -0,0 +1,11 @@
+{
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+}
diff --git a/test/testdata/deployednettemplates/recipes/betanet-model-2/topology.json b/test/testdata/deployednettemplates/recipes/betanet-model-2/topology.json
new file mode 100644
index 000000000..35d31e5d8
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/betanet-model-2/topology.json
@@ -0,0 +1,304 @@
+{
+ "Hosts": [
+ {
+ "Name": "R1-betanet-model-2",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "R2-betanet-model-2",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "R3-betanet-model-2",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "R4-betanet-model-2",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "R5-betanet-model-2",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N1-betanet-model-2",
+ "Template": "AWS-US-EAST-1-c5d.2xl"
+ },
+ {
+ "Name": "N2-betanet-model-2",
+ "Template": "AWS-US-WEST-1-c5d.2xl"
+ },
+ {
+ "Name": "N3-betanet-model-2",
+ "Template": "AWS-SA-EAST-1-c5d.2xl"
+ },
+ {
+ "Name": "N4-betanet-model-2",
+ "Template": "AWS-EU-NORTH-1-c5d.2xl"
+ },
+ {
+ "Name": "N5-betanet-model-2",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.2xl"
+ },
+ {
+ "Name": "N6-betanet-model-2",
+ "Template": "AWS-US-EAST-1-c5d.2xl"
+ },
+ {
+ "Name": "N7-betanet-model-2",
+ "Template": "AWS-US-WEST-1-c5d.2xl"
+ },
+ {
+ "Name": "N8-betanet-model-2",
+ "Template": "AWS-SA-EAST-1-c5d.2xl"
+ },
+ {
+ "Name": "N9-betanet-model-2",
+ "Template": "AWS-EU-NORTH-1-c5d.2xl"
+ },
+ {
+ "Name": "N10-betanet-model-2",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.2xl"
+ },
+ {
+ "Name": "N11-betanet-model-2",
+ "Template": "AWS-US-EAST-1-c5d.2xl"
+ },
+ {
+ "Name": "N12-betanet-model-2",
+ "Template": "AWS-US-WEST-1-c5d.2xl"
+ },
+ {
+ "Name": "N13-betanet-model-2",
+ "Template": "AWS-SA-EAST-1-c5d.2xl"
+ },
+ {
+ "Name": "N14-betanet-model-2",
+ "Template": "AWS-EU-NORTH-1-c5d.2xl"
+ },
+ {
+ "Name": "N15-betanet-model-2",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.2xl"
+ },
+ {
+ "Name": "N16-betanet-model-2",
+ "Template": "AWS-US-EAST-1-c5d.2xl"
+ },
+ {
+ "Name": "N17-betanet-model-2",
+ "Template": "AWS-US-WEST-1-c5d.2xl"
+ },
+ {
+ "Name": "N18-betanet-model-2",
+ "Template": "AWS-SA-EAST-1-c5d.2xl"
+ },
+ {
+ "Name": "N19-betanet-model-2",
+ "Template": "AWS-EU-NORTH-1-c5d.2xl"
+ },
+ {
+ "Name": "N20-betanet-model-2",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.2xl"
+ },
+ {
+ "Name": "NPN1-betanet-model-2",
+ "Template": "AWS-US-EAST-1-Small"
+ },
+ {
+ "Name": "NPN2-betanet-model-2",
+ "Template": "AWS-US-WEST-1-Small"
+ },
+ {
+ "Name": "NPN3-betanet-model-2",
+ "Template": "AWS-SA-EAST-1-Small"
+ },
+ {
+ "Name": "NPN4-betanet-model-2",
+ "Template": "AWS-EU-NORTH-1-Small"
+ },
+ {
+ "Name": "NPN5-betanet-model-2",
+ "Template": "AWS-AP-SOUTHEAST-1-Small"
+ },
+ {
+ "Name": "NPN6-betanet-model-2",
+ "Template": "AWS-US-EAST-1-Small"
+ },
+ {
+ "Name": "NPN7-betanet-model-2",
+ "Template": "AWS-US-WEST-1-Small"
+ },
+ {
+ "Name": "NPN8-betanet-model-2",
+ "Template": "AWS-SA-EAST-1-Small"
+ },
+ {
+ "Name": "NPN9-betanet-model-2",
+ "Template": "AWS-EU-NORTH-1-Small"
+ },
+ {
+ "Name": "NPN10-betanet-model-2",
+ "Template": "AWS-AP-SOUTHEAST-1-Small"
+ },
+ {
+ "Name": "NPN11-betanet-model-2",
+ "Template": "AWS-US-EAST-1-Small"
+ },
+ {
+ "Name": "NPN12-betanet-model-2",
+ "Template": "AWS-US-WEST-1-Small"
+ },
+ {
+ "Name": "NPN13-betanet-model-2",
+ "Template": "AWS-SA-EAST-1-Small"
+ },
+ {
+ "Name": "NPN14-betanet-model-2",
+ "Template": "AWS-EU-NORTH-1-Small"
+ },
+ {
+ "Name": "NPN15-betanet-model-2",
+ "Template": "AWS-AP-SOUTHEAST-1-Small"
+ },
+ {
+ "Name": "NPN16-betanet-model-2",
+ "Template": "AWS-US-EAST-1-Small"
+ },
+ {
+ "Name": "NPN17-betanet-model-2",
+ "Template": "AWS-US-WEST-1-Small"
+ },
+ {
+ "Name": "NPN18-betanet-model-2",
+ "Template": "AWS-SA-EAST-1-Small"
+ },
+ {
+ "Name": "NPN19-betanet-model-2",
+ "Template": "AWS-EU-NORTH-1-Small"
+ },
+ {
+ "Name": "NPN20-betanet-model-2",
+ "Template": "AWS-AP-SOUTHEAST-1-Small"
+ },
+ {
+ "Name": "NPN21-betanet-model-2",
+ "Template": "AWS-US-EAST-1-Small"
+ },
+ {
+ "Name": "NPN22-betanet-model-2",
+ "Template": "AWS-US-WEST-1-Small"
+ },
+ {
+ "Name": "NPN23-betanet-model-2",
+ "Template": "AWS-SA-EAST-1-Small"
+ },
+ {
+ "Name": "NPN24-betanet-model-2",
+ "Template": "AWS-EU-NORTH-1-Small"
+ },
+ {
+ "Name": "NPN25-betanet-model-2",
+ "Template": "AWS-AP-SOUTHEAST-1-Small"
+ },
+ {
+ "Name": "NPN26-betanet-model-2",
+ "Template": "AWS-US-EAST-1-Small"
+ },
+ {
+ "Name": "NPN27-betanet-model-2",
+ "Template": "AWS-US-WEST-1-Small"
+ },
+ {
+ "Name": "NPN28-betanet-model-2",
+ "Template": "AWS-SA-EAST-1-Small"
+ },
+ {
+ "Name": "NPN29-betanet-model-2",
+ "Template": "AWS-EU-NORTH-1-Small"
+ },
+ {
+ "Name": "NPN30-betanet-model-2",
+ "Template": "AWS-AP-SOUTHEAST-1-Small"
+ },
+ {
+ "Name": "NPN31-betanet-model-2",
+ "Template": "AWS-US-EAST-1-Small"
+ },
+ {
+ "Name": "NPN32-betanet-model-2",
+ "Template": "AWS-US-WEST-1-Small"
+ },
+ {
+ "Name": "NPN33-betanet-model-2",
+ "Template": "AWS-SA-EAST-1-Small"
+ },
+ {
+ "Name": "NPN34-betanet-model-2",
+ "Template": "AWS-EU-NORTH-1-Small"
+ },
+ {
+ "Name": "NPN35-betanet-model-2",
+ "Template": "AWS-AP-SOUTHEAST-1-Small"
+ },
+ {
+ "Name": "NPN36-betanet-model-2",
+ "Template": "AWS-US-EAST-1-Small"
+ },
+ {
+ "Name": "NPN37-betanet-model-2",
+ "Template": "AWS-US-WEST-1-Small"
+ },
+ {
+ "Name": "NPN38-betanet-model-2",
+ "Template": "AWS-SA-EAST-1-Small"
+ },
+ {
+ "Name": "NPN39-betanet-model-2",
+ "Template": "AWS-EU-NORTH-1-Small"
+ },
+ {
+ "Name": "NPN40-betanet-model-2",
+ "Template": "AWS-AP-SOUTHEAST-1-Small"
+ },
+ {
+ "Name": "NPN41-betanet-model-2",
+ "Template": "AWS-US-EAST-1-Small"
+ },
+ {
+ "Name": "NPN42-betanet-model-2",
+ "Template": "AWS-US-WEST-1-Small"
+ },
+ {
+ "Name": "NPN43-betanet-model-2",
+ "Template": "AWS-SA-EAST-1-Small"
+ },
+ {
+ "Name": "NPN44-betanet-model-2",
+ "Template": "AWS-EU-NORTH-1-Small"
+ },
+ {
+ "Name": "NPN45-betanet-model-2",
+ "Template": "AWS-AP-SOUTHEAST-1-Small"
+ },
+ {
+ "Name": "NPN46-betanet-model-2",
+ "Template": "AWS-US-EAST-1-Small"
+ },
+ {
+ "Name": "NPN47-betanet-model-2",
+ "Template": "AWS-US-WEST-1-Small"
+ },
+ {
+ "Name": "NPN48-betanet-model-2",
+ "Template": "AWS-SA-EAST-1-Small"
+ },
+ {
+ "Name": "NPN49-betanet-model-2",
+ "Template": "AWS-EU-NORTH-1-Small"
+ },
+ {
+ "Name": "NPN50-betanet-model-2",
+ "Template": "AWS-AP-SOUTHEAST-1-Small"
+ }
+ ]
+}
diff --git a/tools/debug/algodump/main.go b/tools/debug/algodump/main.go
index e3ebba922..f61944ff1 100644
--- a/tools/debug/algodump/main.go
+++ b/tools/debug/algodump/main.go
@@ -99,7 +99,7 @@ func (dh *dumpHandler) Handle(msg network.IncomingMessage) network.OutgoingMessa
data = fmt.Sprintf("proposal %s", shortdigest(crypto.Digest(p.Block.Hash())))
case protocol.TxnTag:
- dec := protocol.NewDecoderBytes(msg.Data)
+ dec := protocol.NewMsgpDecoderBytes(msg.Data)
for {
var stx transactions.SignedTxn
err := dec.Decode(&stx)
diff --git a/tools/debug/doberman/logo.go b/tools/debug/doberman/logo.go
index 163b73951..a27411074 100644
--- a/tools/debug/doberman/logo.go
+++ b/tools/debug/doberman/logo.go
@@ -16,7 +16,7 @@
package main
-// data, err := ioutil.ReadFile("algorand-logo.png")
+// data, err := os.ReadFile("algorand-logo.png")
// fmt.Printf("%#v\n", data)
var logo = []byte{0x89, 0x50, 0x4e, 0x47, 0xd, 0xa, 0x1a, 0xa, 0x0, 0x0, 0x0, 0xd, 0x49, 0x48, 0x44, 0x52, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0xf0, 0x8, 0x2, 0x0, 0x0, 0x0, 0xb1, 0x37, 0x7e, 0xc5, 0x0, 0x0, 0xf, 0xa1, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0xec, 0x9d, 0x7b, 0x6c, 0x14, 0xd5, 0x17, 0xc7, 0x67, 0xbb, 0xdb, 0xd7, 0x6e, 0x2d, 0x65, 0xfb, 0xb2, 0x80, 0xf, 0x5a, 0xa0, 0x22, 0x18, 0x68, 0x8, 0xa2, 0x88, 0x1a, 0x63, 0x85, 0x50, 0xd2, 0x8a, 0x50, 0x81, 0xaa, 0x4, 0x62, 0xf0, 0x5, 0x46, 0x57, 0x14, 0x9, 0xad, 0x9, 0x1a, 0x6c, 0xa2, 0xc5, 0x12, 0xf8, 0x3, 0xf1, 0x81, 0x5, 0x6b, 0x45, 0x9a, 0xa, 0x85, 0x95, 0x50, 0x8b, 0x3c, 0x82, 0xba, 0x46, 0xd0, 0x3e, 0x10, 0x69, 0x30, 0xb5, 0x58, 0x42, 0x8b, 0x85, 0x6e, 0xb, 0xdd, 0xdd, 0xb6, 0xfb, 0x9c, 0xfd, 0xe5, 0x97, 0x4d, 0xaa, 0x11, 0x28, 0xdd, 0x73, 0x66, 0xf6, 0x31, 0xfd, 0x7e, 0xfe, 0x2, 0xc2, 0x9d, 0x33, 0x7b, 0xe7, 0xb3, 0x67, 0xcf, 0xdc, 0xb9, 0x73, 0xaf, 0xc6, 0xeb, 0xf5, 0xa, 0x0, 0x28, 0x85, 0x8, 0x74, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x40, 0xf9, 0x68, 0xd0, 0x5, 0x41, 0xa7, 0xa9, 0xa9, 0x69, 0xf7, 0xee, 0xdd, 0x11, 0x11, 0xfe, 0x25, 0x17, 0x9d, 0x4e, 0xb7, 0x66, 0xcd, 0x1a, 0xf4, 0xde, 0x7f, 0x50, 0x79, 0xbd, 0x5e, 0xf4, 0x42, 0x70, 0x99, 0x35, 0x6b, 0x96, 0xc9, 0x64, 0x22, 0x34, 0xb4, 0xdb, 0xed, 0xd1, 0xd1, 0xd1, 0xe8, 0x40, 0x94, 0x1c, 0x21, 0xc4, 0xf1, 0xe3, 0xc7, 0x69, 0x36, 0xb, 0x82, 0x70, 0xfa, 0xf4, 0x69, 0x74, 0x20, 0x84, 0xe, 0x21, 0xdc, 0x6e, 0x77, 0x5e, 0x5e, 0x1e, 0xb9, 0xf9, 0xd7, 0x5f, 0x7f, 0x8d, 0x3e, 0x84, 0xd0, 0x21, 0xc4, 0x87, 0x1f, 0x7e, 0x68, 0xb1, 0x58, 0xc8, 0xcd, 0xcb, 0xca, 0xca, 0xd0, 0x87, 0xa8, 0xa1, 0x43, 0x28, 0x3d, 0x47, 0x46, 0x46, 0x32, 0xf, 0x62, 0xb5, 0x5a, 0xe3, 0xe2, 0xe2, 0xd0, 0x99, 0xc8, 0xd0, 0xc1, 0x67, 0xf3, 0xe6, 0xcd, 0xfc, 0x83, 0xb4, 0xb5, 0xb5, 0xa1, 0x27, 0x21, 0x74, 0xf0, 0x69, 0x68, 0x68, 0x90, 0x64, 0xd0, 0xed, 0xb3, 0xcf, 0x3e, 0x43, 0x67, 0xa2, 0xe4, 0x8, 0x3e, 0x23, 0x47, 0x8e, 0xbc, 0x7a, 0xf5, 0x2a, 0xff, 0x38, 0x71, 0x71, 0x71, 0x16, 0x8b, 0x45, 0xa5, 0x52, 0xa1, 0x4b, 0x91, 0xa1, 0x83, 0xc6, 0xfb, 0xef, 0xbf, 0x2f, 0x89, 0xcd, 0x82, 0x20, 0xd8, 0x6c, 0xb6, 0xfa, 0xfa, 0x7a, 0x74, 0x29, 0x32, 0x74, 0xd0, 0xe8, 0xeb, 0xeb, 0x4b, 0x48, 0x48, 0x70, 0xb9, 0x5c, 0x52, 0x1d, 0x30, 0x37, 0x37, 0xd7, 0x68, 0x34, 0xa2, 0x63, 0x91, 0xa1, 0x83, 0xc3, 0xb, 0x2f, 0xbc, 0x20, 0xa1, 0xcd, 0x82, 0x20, 0xd4, 0xd4, 0xd4, 0xa0, 0x57, 0x91, 0xa1, 0x83, 0x43, 0x6f, 0x6f, 0xaf, 0x1c, 0xa3, 0x6c, 0xb8, 0x88, 0xc8, 0xd0, 0xc1, 0xe1, 0xe5, 0x97, 0x5f, 0x96, 0xe3, 0xb0, 0x3f, 0xfc, 0xf0, 0x3, 0xfa, 0x16, 0x19, 0x3a, 0xd0, 0xb4, 0xb6, 0xb6, 0x8e, 0x1d, 0x3b, 0x56, 0x8e, 0x23, 0xcf, 0x9e, 0x3d, 0xbb, 0xb6, 0xb6, 0x16, 0x3d, 0xc, 0xa1, 0x3, 0x87, 0xcb, 0xe5, 0xba, 0xeb, 0xae, 0xbb, 0xce, 0x9d, 0x3b, 0x27, 0xc7, 0xc1, 0xe3, 0xe3, 0xe3, 0x7b, 0x7a, 0x7a, 0xd0, 0xc9, 0x28, 0x39, 0x2, 0xc7, 0xd2, 0xa5, 0x4b, 0x65, 0xb2, 0x59, 0x10, 0x4, 0x8b, 0xc5, 0x2, 0xa1, 0x21, 0x74, 0xe0, 0xe8, 0xea, 0xea, 0xaa, 0xac, 0xac, 0x94, 0x35, 0x44, 0x45, 0x45, 0x5, 0xfa, 0x19, 0x42, 0x7, 0x88, 0xe5, 0xcb, 0x97, 0xcb, 0x1d, 0xe2, 0xf0, 0xe1, 0xc3, 0xe8, 0x67, 0xd4, 0xd0, 0x81, 0xe0, 0xe2, 0xc5, 0x8b, 0xa3, 0x47, 0x8f, 0x96, 0x3b, 0x4a, 0x52, 0x52, 0x52, 0x67, 0x67, 0x27, 0x7a, 0x1b, 0x19, 0x5a, 0x76, 0x66, 0xcf, 0x9e, 0x1d, 0x80, 0x28, 0x66, 0xb3, 0xb9, 0xa3, 0xa3, 0x3, 0xbd, 0xd, 0xa1, 0xe5, 0xa5, 0xba, 0xba, 0xfa, 0xcc, 0x99, 0x33, 0x81, 0x89, 0xb5, 0x6f, 0xdf, 0x3e, 0x74, 0x38, 0x4a, 0xe, 0x19, 0x71, 0xbb, 0xdd, 0x49, 0x49, 0x49, 0x1, 0x1b, 0x7f, 0xc8, 0xce, 0xce, 0xfe, 0xee, 0xbb, 0xef, 0x90, 0xa1, 0x81, 0x5c, 0x94, 0x96, 0x96, 0x6, 0x72, 0x34, 0xed, 0xd8, 0xb1, 0x63, 0x1e, 0x8f, 0x7, 0x19, 0x1a, 0x19, 0x5a, 0xae, 0xf4, 0x1c, 0x15, 0x15, 0x15, 0xe0, 0xee, 0x6d, 0x6f, 0x6f, 0x1f, 0x35, 0x6a, 0x14, 0x32, 0x34, 0x90, 0x9e, 0x4d, 0x9b, 0x36, 0x91, 0x6d, 0x26, 0x4f, 0xd8, 0x3f, 0x7a, 0xf4, 0x28, 0x4a, 0xe, 0x20, 0x3d, 0x3f, 0xff, 0xfc, 0xf3, 0xda, 0xb5, 0x6b, 0xc9, 0xcd, 0xd, 0x6, 0x43, 0x52, 0x52, 0x12, 0xa1, 0xe1, 0xf6, 0xed, 0xdb, 0x51, 0x72, 0xa0, 0xe4, 0x90, 0x9e, 0x98, 0x98, 0x18, 0x87, 0xc3, 0x41, 0x6b, 0xab, 0xd5, 0x6a, 0x7b, 0x7b, 0x7b, 0x73, 0x73, 0x73, 0xf, 0x1c, 0x38, 0x40, 0x68, 0xee, 0x74, 0x3a, 0xf9, 0x2f, 0x93, 0x23, 0x43, 0x83, 0x7f, 0x78, 0xeb, 0xad, 0xb7, 0xc8, 0x36, 0xfb, 0x6a, 0x15, 0xce, 0xc3, 0xc5, 0x8b, 0x17, 0x2f, 0x22, 0x43, 0x3, 0xc9, 0x70, 0xb9, 0x5c, 0x5a, 0xad, 0xd6, 0xed, 0x76, 0xd3, 0x9a, 0xa7, 0xa4, 0xa4, 0x5c, 0xba, 0x74, 0xc9, 0xeb, 0xf5, 0x8a, 0xa2, 0x18, 0x15, 0x15, 0x25, 0x8a, 0xa2, 0xbf, 0x47, 0x28, 0x2d, 0x2d, 0x5d, 0xbd, 0x7a, 0xf5, 0xb0, 0xed, 0x7f, 0xac, 0x3e, 0x2a, 0x31, 0x45, 0x45, 0x45, 0x64, 0x9b, 0x5, 0x41, 0x28, 0x29, 0x29, 0xf1, 0xdd, 0x14, 0xaa, 0xd5, 0x6a, 0x9d, 0x4e, 0x67, 0xb5, 0x5a, 0xfd, 0x3d, 0xc2, 0xd6, 0xad, 0x5b, 0xf5, 0x7a, 0xbd, 0xbf, 0xad, 0xbc, 0x5e, 0x6f, 0x6a, 0x6a, 0x6a, 0x4e, 0x4e, 0xe, 0x32, 0x34, 0xf8, 0x7, 0x9b, 0xcd, 0x16, 0x1f, 0x1f, 0x4f, 0xee, 0x52, 0xbd, 0x5e, 0xdf, 0xd5, 0xd5, 0x35, 0xf0, 0xd7, 0xec, 0xec, 0xec, 0x23, 0x47, 0x8e, 0x4, 0xec, 0xe4, 0xcd, 0x66, 0xb3, 0x5e, 0xaf, 0xf, 0xf7, 0x15, 0x11, 0x50, 0x43, 0x4b, 0xc9, 0xd2, 0xa5, 0x4b, 0x39, 0x9, 0xa2, 0xae, 0xae, 0x6e, 0xa0, 0xb9, 0xd7, 0xeb, 0xd, 0xe4, 0xf2, 0xcf, 0x35, 0x35, 0x35, 0x89, 0x89, 0x89, 0x4a, 0x58, 0xdf, 0xc3, 0xb, 0x24, 0xa2, 0xb1, 0xb1, 0x91, 0x73, 0x21, 0xbe, 0xf8, 0xe2, 0x8b, 0x6b, 0x8f, 0x19, 0x98, 0xe5, 0x9f, 0xc7, 0x8f, 0x1f, 0x2f, 0x8a, 0xa2, 0x32, 0xae, 0x2, 0x32, 0xb4, 0x34, 0x88, 0xa2, 0xf8, 0xf8, 0xe3, 0x8f, 0x93, 0x9b, 0xa7, 0xa5, 0xa5, 0x3d, 0xfd, 0xf4, 0xd3, 0xd7, 0xfe, 0xfb, 0x13, 0x4f, 0x3c, 0x11, 0x80, 0x93, 0xdf, 0xbf, 0x7f, 0xbf, 0x62, 0xd6, 0x5e, 0x82, 0xd0, 0xd2, 0x70, 0xe0, 0xc0, 0x81, 0xf3, 0xe7, 0xcf, 0x93, 0x9b, 0x1b, 0x8d, 0xc6, 0xeb, 0x2a, 0x55, 0x58, 0x58, 0x28, 0xf7, 0x99, 0xe7, 0xe7, 0xe7, 0x4f, 0x9c, 0x38, 0x51, 0x31, 0x17, 0x2, 0x37, 0x85, 0xd2, 0xa4, 0x67, 0xbd, 0x5e, 0x4f, 0x9e, 0x87, 0x34, 0x66, 0xcc, 0x98, 0xb, 0x17, 0x2e, 0xdc, 0xa8, 0x20, 0xf4, 0x77, 0xef, 0x15, 0x7f, 0x69, 0x6a, 0x6a, 0x52, 0x92, 0xd0, 0xc8, 0xd0, 0x12, 0x30, 0x77, 0xee, 0x5c, 0xce, 0xac, 0xba, 0x41, 0x16, 0xe2, 0x57, 0xa9, 0x54, 0xb2, 0x96, 0xd1, 0xf3, 0xe6, 0xcd, 0x53, 0x92, 0xcd, 0xb8, 0x29, 0x94, 0x80, 0xd6, 0xd6, 0x56, 0x4e, 0xff, 0x3f, 0xf2, 0xc8, 0x23, 0x83, 0x1f, 0x7f, 0xe5, 0xca, 0x95, 0x32, 0x5d, 0xfa, 0xc8, 0xc8, 0x48, 0xb7, 0xdb, 0xad, 0x98, 0xdb, 0x41, 0xdc, 0x14, 0x4a, 0xc3, 0x83, 0xf, 0x3e, 0x48, 0x6e, 0xab, 0x56, 0xab, 0x8d, 0x46, 0xe3, 0xe0, 0x55, 0xdf, 0x4b, 0x2f, 0xbd, 0x24, 0xd3, 0x99, 0xaf, 0x5f, 0xbf, 0x5e, 0xad, 0x56, 0x2b, 0x6c, 0x29, 0x5e, 0x3c, 0x29, 0x64, 0xb1, 0x73, 0xe7, 0xce, 0x1b, 0x95, 0xbf, 0x43, 0xe1, 0xd5, 0x57, 0x5f, 0xbd, 0xe9, 0x52, 0x77, 0xe3, 0xc6, 0x8d, 0x93, 0xe3, 0xcc, 0xb5, 0x5a, 0x6d, 0x51, 0x51, 0x91, 0xf2, 0xae, 0x8, 0x6e, 0xa, 0x59, 0xdc, 0x72, 0xcb, 0x2d, 0x36, 0x9b, 0x8d, 0x98, 0x4b, 0x34, 0x1a, 0x97, 0xcb, 0xe5, 0xf5, 0x7a, 0x6f, 0x9a, 0x23, 0x27, 0x4c, 0x98, 0xd0, 0xdc, 0xdc, 0x2c, 0xed, 0x99, 0xd7, 0xd5, 0xd5, 0x65, 0x65, 0x65, 0x29, 0x6f, 0xa5, 0x74, 0x94, 0x1c, 0x74, 0x8a, 0x8a, 0x8a, 0xc8, 0x36, 0xfb, 0x86, 0xea, 0x86, 0x38, 0x97, 0xff, 0xb9, 0xe7, 0x9e, 0x93, 0xf6, 0xcc, 0x8b, 0x8b, 0x8b, 0x15, 0x69, 0x33, 0x32, 0x34, 0x1d, 0x97, 0xcb, 0xa5, 0xd3, 0xe9, 0xc8, 0x2b, 0x3d, 0x4f, 0x9f, 0x3e, 0xfd, 0xe4, 0xc9, 0x93, 0x43, 0xfc, 0xcf, 0x27, 0x4e, 0x9c, 0xb8, 0xef, 0xbe, 0xfb, 0x24, 0x3c, 0x79, 0x9b, 0xcd, 0xa6, 0xd3, 0xe9, 0x14, 0x79, 0x5d, 0x90, 0xa1, 0xe9, 0x77, 0x54, 0x9c, 0x75, 0xcb, 0xfd, 0x5a, 0x2c, 0x74, 0xca, 0x94, 0x29, 0x12, 0x9e, 0xf9, 0x7, 0x1f, 0x7c, 0xa0, 0x54, 0x9b, 0x5, 0xc, 0xdb, 0xd1, 0xb8, 0x74, 0xe9, 0x12, 0xa7, 0xcf, 0xb, 0xb, 0xb, 0xfd, 0x8d, 0x38, 0x75, 0xea, 0x54, 0x49, 0x2e, 0x77, 0x62, 0x62, 0xa2, 0xc7, 0xe3, 0x51, 0xd8, 0x50, 0x1d, 0x86, 0xed, 0xb8, 0x29, 0x20, 0x37, 0x37, 0x97, 0xdc, 0x5c, 0xa7, 0xd3, 0xbd, 0xf3, 0xce, 0x3b, 0xfe, 0x56, 0x7a, 0xb, 0x17, 0x2e, 0x94, 0xe4, 0xe4, 0xcb, 0xcb, 0xcb, 0x23, 0x22, 0x22, 0x14, 0xbc, 0x6b, 0x16, 0x84, 0xf6, 0x9b, 0x37, 0xdf, 0x7c, 0x73, 0xe8, 0xe5, 0xef, 0xb5, 0x6c, 0xdf, 0xbe, 0x5d, 0xa3, 0xd1, 0xf8, 0xa5, 0x94, 0xd7, 0xeb, 0xcd, 0xcf, 0xcf, 0xe7, 0x9f, 0xf9, 0x94, 0x29, 0x53, 0x72, 0x72, 0x72, 0x94, 0x7d, 0xd7, 0x84, 0x9b, 0x42, 0xff, 0xbb, 0x8c, 0x91, 0xde, 0xc6, 0x8e, 0x1d, 0x4b, 0x5e, 0x25, 0x5a, 0xab, 0xd5, 0xf6, 0xf7, 0xf7, 0x73, 0xce, 0xbc, 0xad, 0xad, 0x2d, 0x0, 0xcb, 0x46, 0x22, 0x43, 0x87, 0x13, 0xcc, 0x4d, 0x52, 0x36, 0x6c, 0xd8, 0x40, 0x6e, 0x9b, 0x91, 0x91, 0xc1, 0x9, 0x9d, 0x99, 0x99, 0xa9, 0x78, 0x9b, 0x91, 0xa1, 0xfd, 0xa3, 0xa7, 0xa7, 0x47, 0xaf, 0xd7, 0x13, 0x5e, 0x5c, 0xe5, 0xa7, 0x67, 0xdf, 0x48, 0xdf, 0xaf, 0xbf, 0xfe, 0x4a, 0x6e, 0x7e, 0xe5, 0xca, 0x95, 0x11, 0x23, 0x46, 0x28, 0x7e, 0xcf, 0x59, 0x64, 0x68, 0x3f, 0xc8, 0xcd, 0xcd, 0x25, 0xdb, 0xac, 0xd1, 0x68, 0x8e, 0x1d, 0x3b, 0x46, 0x4e, 0x1f, 0x7, 0xf, 0x1e, 0xe4, 0xd8, 0xbc, 0x7c, 0xf9, 0xf2, 0x84, 0x84, 0x84, 0xe1, 0xb0, 0x83, 0x32, 0x32, 0xf4, 0x50, 0xf9, 0xf1, 0xc7, 0x1f, 0x39, 0xf3, 0x90, 0xf6, 0xec, 0xd9, 0xb3, 0x60, 0xc1, 0x2, 0x5a, 0x5b, 0x51, 0x14, 0x93, 0x93, 0x93, 0xbb, 0xbb, 0xbb, 0x69, 0xcd, 0x93, 0x93, 0x93, 0x3b, 0x3a, 0x3a, 0xe4, 0x9e, 0x57, 0xd, 0xa1, 0xc3, 0x8c, 0x31, 0x63, 0xc6, 0xb4, 0xb7, 0xb7, 0xd3, 0xda, 0xe, 0xac, 0xb6, 0x41, 0xcb, 0x91, 0x5b, 0xb6, 0x6c, 0x31, 0x18, 0xc, 0x9c, 0x93, 0xb7, 0xdb, 0xed, 0x81, 0x79, 0x3d, 0x11, 0x25, 0x47, 0x78, 0x50, 0x59, 0x59, 0x49, 0xb6, 0x59, 0x10, 0x4, 0x93, 0xc9, 0x44, 0x1e, 0x1e, 0xe9, 0xeb, 0xeb, 0x63, 0xda, 0xec, 0xab, 0x58, 0x86, 0xc9, 0x95, 0x82, 0xd0, 0x43, 0xfa, 0xc5, 0xe7, 0x4c, 0x4a, 0xbe, 0xfd, 0xf6, 0xdb, 0x39, 0x3, 0x14, 0x65, 0x65, 0x65, 0xfc, 0x8f, 0xf0, 0xd1, 0x47, 0x1f, 0xd, 0x93, 0x8b, 0x85, 0x92, 0xe3, 0xe6, 0x94, 0x97, 0x97, 0x2f, 0x5b, 0xb6, 0x8c, 0xdc, 0x9c, 0xf3, 0xd2, 0x5e, 0x7f, 0x7f, 0xbf, 0x56, 0xab, 0x95, 0x20, 0x6f, 0x45, 0x44, 0xc, 0x93, 0xb5, 0xd0, 0x91, 0xa1, 0x6f, 0x82, 0xcb, 0xe5, 0xe2, 0xd8, 0xbc, 0x70, 0xe1, 0x42, 0xce, 0x4b, 0x7b, 0x9c, 0x67, 0xec, 0xff, 0xf9, 0x91, 0x61, 0x3e, 0x94, 0x81, 0xd0, 0xa, 0xa1, 0xa0, 0xa0, 0x80, 0xdc, 0x56, 0xa3, 0xd1, 0xec, 0xd8, 0xb1, 0x83, 0xfc, 0x1b, 0xb8, 0x61, 0xc3, 0x6, 0x9, 0x97, 0x2, 0xdb, 0xb1, 0x63, 0xc7, 0xb0, 0xb8, 0x60, 0x98, 0x3a, 0x37, 0x8, 0x9f, 0x7c, 0xf2, 0x9, 0xa7, 0x6f, 0x8b, 0x8a, 0x8a, 0xc8, 0xa1, 0x45, 0x51, 0x94, 0x76, 0x99, 0xe7, 0x49, 0x93, 0x26, 0xd, 0x87, 0x4b, 0x86, 0x1a, 0x7a, 0x30, 0x62, 0x63, 0x63, 0xed, 0x76, 0x3b, 0xad, 0x6d, 0x5c, 0x5c, 0x1c, 0x61, 0xed, 0xd0, 0x1, 0xf2, 0xf3, 0xf3, 0xf7, 0xec, 0xd9, 0x23, 0xe1, 0x67, 0x89, 0x89, 0x89, 0x19, 0xe, 0x55, 0x7, 0x4a, 0x8e, 0x1b, 0xb2, 0x75, 0xeb, 0x56, 0xb2, 0xcd, 0xbe, 0x17, 0x60, 0xc9, 0x6d, 0xfb, 0xfb, 0xfb, 0x25, 0xdf, 0x74, 0xd0, 0x6e, 0xb7, 0xff, 0x7b, 0x69, 0x53, 0x94, 0x1c, 0xc3, 0xb, 0x87, 0xc3, 0x11, 0x13, 0x13, 0x43, 0xee, 0xd5, 0x59, 0xb3, 0x66, 0x71, 0xa2, 0x2f, 0x5e, 0xbc, 0x58, 0x8e, 0x6b, 0xbd, 0x71, 0xe3, 0x46, 0xc5, 0x5f, 0x38, 0x8, 0x7d, 0x7d, 0x56, 0xac, 0x58, 0xc1, 0x51, 0xc7, 0xe9, 0x74, 0x92, 0xdf, 0xa, 0x91, 0x6f, 0xe7, 0xd9, 0xe9, 0xd3, 0xa7, 0x2b, 0xfe, 0xc2, 0x61, 0x5d, 0x8e, 0xeb, 0xd0, 0xda, 0xda, 0xca, 0xd9, 0x4e, 0xaa, 0xa4, 0xa4, 0x84, 0x7c, 0x3f, 0xe7, 0xf5, 0x7a, 0xf3, 0xf2, 0xf2, 0x64, 0xfa, 0x5c, 0xa7, 0x4f, 0x9f, 0x56, 0xfc, 0xb5, 0xc3, 0x4d, 0xe1, 0x75, 0x98, 0x36, 0x6d, 0x5a, 0x7d, 0x7d, 0x3d, 0xad, 0x6d, 0x54, 0x54, 0x54, 0x6f, 0x6f, 0xaf, 0x46, 0x43, 0xcc, 0x14, 0x87, 0xe, 0x1d, 0x9a, 0x33, 0x67, 0x8e, 0x7c, 0x1f, 0xad, 0xb1, 0xb1, 0x51, 0xda, 0x57, 0x6e, 0x71, 0x53, 0x18, 0xea, 0x5c, 0xbe, 0x7c, 0x99, 0x6c, 0xb3, 0x20, 0x8, 0xef, 0xbd, 0xf7, 0x1e, 0xd9, 0x66, 0x41, 0x10, 0xae, 0xbb, 0x4a, 0xb4, 0x84, 0x7c, 0xff, 0xfd, 0xf7, 0x18, 0xe5, 0x18, 0x5e, 0x3c, 0xf4, 0xd0, 0x43, 0xe4, 0xb6, 0xf1, 0xf1, 0xf1, 0x6, 0x83, 0x81, 0xfc, 0xa3, 0x57, 0x53, 0x53, 0x63, 0x36, 0x9b, 0x65, 0xfd, 0x74, 0x83, 0xac, 0x74, 0x8a, 0x92, 0x43, 0x81, 0x30, 0x27, 0x3d, 0x1b, 0x8d, 0x46, 0xf2, 0xc3, 0xea, 0xf6, 0xf6, 0xf6, 0xdb, 0x6e, 0xbb, 0x4d, 0xee, 0xcb, 0xa1, 0x52, 0xa9, 0x9c, 0x4e, 0x27, 0xe7, 0x37, 0x4, 0x42, 0x87, 0xd, 0xdd, 0xdd, 0xdd, 0x69, 0x69, 0x69, 0x4e, 0xa7, 0x93, 0xd6, 0x3c, 0x2b, 0x2b, 0x8b, 0x53, 0xab, 0xa4, 0xa7, 0xa7, 0xff, 0xf5, 0xd7, 0x5f, 0x1, 0xf8, 0x98, 0xbf, 0xfd, 0xf6, 0xdb, 0x3d, 0xf7, 0xdc, 0x83, 0x92, 0x63, 0x58, 0x14, 0x1b, 0x64, 0x9b, 0x55, 0x2a, 0xd5, 0x4d, 0x17, 0xc6, 0x1d, 0x84, 0xdd, 0xbb, 0x77, 0x7, 0xc6, 0x66, 0x41, 0x10, 0xbe, 0xf9, 0xe6, 0x1b, 0x25, 0x5f, 0x45, 0xc, 0x39, 0xfb, 0x38, 0x7c, 0xf8, 0x30, 0xa7, 0x1b, 0xe7, 0xcf, 0x9f, 0xcf, 0x89, 0x3e, 0x62, 0xc4, 0x88, 0x80, 0x5d, 0xf1, 0x3b, 0xee, 0xb8, 0x3, 0x73, 0x39, 0x94, 0xf, 0x67, 0xa8, 0x4e, 0xad, 0x56, 0x77, 0x75, 0x75, 0x91, 0xa5, 0xac, 0xaa, 0xaa, 0x5a, 0xb4, 0x68, 0x51, 0x20, 0x3f, 0xac, 0xc3, 0xe1, 0x88, 0x8a, 0x8a, 0x42, 0xc9, 0xa1, 0x58, 0xe, 0x1d, 0x3a, 0xc4, 0x29, 0x7f, 0x6b, 0x6b, 0x6b, 0xc9, 0x36, 0x7b, 0x3c, 0x1e, 0xce, 0x7c, 0x6b, 0x1a, 0x7f, 0xff, 0xfd, 0x37, 0x6a, 0x68, 0xc5, 0x22, 0x8a, 0x22, 0x67, 0xf4, 0x37, 0x3d, 0x3d, 0xfd, 0xd1, 0x47, 0x1f, 0x25, 0x37, 0x7f, 0xf7, 0xdd, 0x77, 0xc9, 0x93, 0xe0, 0xc8, 0xab, 0xd4, 0x55, 0x54, 0x54, 0xa0, 0x86, 0x56, 0x2c, 0x9b, 0x36, 0x6d, 0xe2, 0x74, 0x60, 0x5b, 0x5b, 0x1b, 0x39, 0x34, 0x73, 0xd4, 0x79, 0xdd, 0xba, 0x75, 0x69, 0x69, 0x69, 0x84, 0x86, 0x7a, 0xbd, 0x1e, 0x93, 0x93, 0x94, 0x89, 0xc5, 0x62, 0xe1, 0x28, 0xb5, 0x62, 0xc5, 0xa, 0x4e, 0x74, 0xce, 0x98, 0x77, 0x4c, 0x4c, 0x8c, 0xc3, 0xe1, 0x20, 0x4f, 0xa2, 0x3a, 0x7b, 0xf6, 0x2c, 0x84, 0x56, 0x20, 0xf3, 0xe7, 0xcf, 0xe7, 0x8, 0x6d, 0xb5, 0x5a, 0xc9, 0xb3, 0xea, 0xfe, 0xf8, 0xe3, 0xf, 0x4e, 0xe8, 0xb2, 0xb2, 0x32, 0xaf, 0xd7, 0xdb, 0xd2, 0xd2, 0x42, 0x6b, 0xbe, 0x6c, 0xd9, 0x32, 0x8, 0xad, 0x34, 0xdc, 0x6e, 0x37, 0x47, 0x29, 0xa6, 0x13, 0x9c, 0x49, 0x42, 0xb1, 0xb1, 0xb1, 0x3, 0xc7, 0xa1, 0xcd, 0xdb, 0x4e, 0x4d, 0x4d, 0x55, 0xe4, 0x35, 0x1d, 0xd6, 0x37, 0x85, 0x9c, 0x79, 0x6d, 0x91, 0x91, 0x91, 0x9f, 0x7e, 0xfa, 0x29, 0x79, 0xd0, 0x73, 0xed, 0xda, 0xb5, 0xa7, 0x4e, 0x9d, 0x22, 0x47, 0xdf, 0xb2, 0x65, 0xcb, 0xc0, 0x9f, 0x9f, 0x7c, 0xf2, 0x49, 0xc2, 0x11, 0x98, 0x9b, 0x10, 0xe0, 0xa6, 0x30, 0xe4, 0xd8, 0xb8, 0x71, 0x23, 0xa7, 0xdf, 0x4a, 0x4a, 0x4a, 0xc8, 0xa1, 0x6d, 0x36, 0x1b, 0x67, 0xa5, 0x39, 0xdf, 0x94, 0xf, 0x1f, 0xa2, 0x28, 0x92, 0x4b, 0x97, 0xba, 0xba, 0x3a, 0x94, 0x1c, 0xa, 0xc1, 0xe1, 0x70, 0x70, 0x26, 0xe8, 0xa4, 0xa4, 0xa4, 0x70, 0xa2, 0x73, 0x86, 0xf9, 0x4, 0x41, 0x68, 0x6e, 0x6e, 0xfe, 0x4f, 0xe1, 0x4e, 0x3b, 0xce, 0x82, 0x5, 0xb, 0x20, 0x34, 0xee, 0x5, 0xff, 0x4f, 0x7d, 0x7d, 0x3d, 0xf9, 0x5e, 0xf0, 0xc4, 0x89, 0x13, 0x9c, 0xd0, 0x8f, 0x3d, 0xf6, 0xd8, 0xb5, 0xc7, 0xbc, 0xf3, 0xce, 0x3b, 0x31, 0x78, 0x37, 0x7c, 0x85, 0x66, 0xe, 0x2f, 0x94, 0x96, 0x96, 0x72, 0xa2, 0xd3, 0xe4, 0xf3, 0x91, 0x90, 0x90, 0xe0, 0x70, 0x38, 0xae, 0xfd, 0x2e, 0xd1, 0x96, 0xae, 0x53, 0xa9, 0x54, 0x76, 0xbb, 0x1d, 0x42, 0x87, 0x3d, 0x4b, 0x96, 0x2c, 0xe1, 0x8, 0xed, 0x76, 0xbb, 0xc9, 0xa1, 0x1b, 0x1b, 0x1b, 0x39, 0xa1, 0xab, 0xab, 0xab, 0xa5, 0xfd, 0x8a, 0xfa, 0xd6, 0x60, 0x87, 0xd0, 0x61, 0xcc, 0xd9, 0xb3, 0x67, 0x39, 0x4a, 0x7d, 0xfc, 0xf1, 0xc7, 0xe4, 0xd0, 0x1e, 0x8f, 0x87, 0xf6, 0x60, 0x6f, 0x60, 0xa0, 0xed, 0x46, 0x75, 0xe, 0x79, 0xfd, 0x90, 0x82, 0x82, 0x2, 0x8, 0x1d, 0xc6, 0x88, 0xa2, 0xc8, 0x59, 0x3a, 0x31, 0x21, 0x21, 0xc1, 0xe3, 0xf1, 0x90, 0xa3, 0x57, 0x56, 0x56, 0x72, 0xbe, 0x4b, 0xa7, 0x4e, 0x9d, 0x1a, 0xe4, 0xe0, 0x93, 0x27, 0x4f, 0x26, 0x1c, 0x73, 0xd4, 0xa8, 0x51, 0x10, 0x3a, 0x8c, 0xa9, 0xa9, 0xa9, 0xe1, 0x28, 0x65, 0x32, 0x99, 0xc8, 0xa1, 0xc9, 0x8f, 0xf4, 0x7c, 0xe4, 0xe6, 0xe6, 0xe, 0x7e, 0xfc, 0x6d, 0xdb, 0xb6, 0xd1, 0x8e, 0xdc, 0xd5, 0xd5, 0x5, 0xa1, 0xc3, 0x35, 0x3d, 0x73, 0x76, 0xcd, 0x79, 0xf8, 0xe1, 0x87, 0x7d, 0x7, 0xa1, 0x45, 0x4f, 0x49, 0x49, 0x21, 0x87, 0x56, 0xab, 0xd5, 0xdd, 0xdd, 0xdd, 0x83, 0x84, 0x16, 0x45, 0xf1, 0xcf, 0x3f, 0xff, 0xa4, 0x1d, 0xbc, 0xbc, 0xbc, 0x5c, 0x49, 0x57, 0x79, 0x18, 0x3d, 0x29, 0x7c, 0xfd, 0xf5, 0xd7, 0x39, 0x6f, 0x33, 0x7c, 0xf5, 0xd5, 0x57, 0xe4, 0x6d, 0x25, 0x2a, 0x2a, 0x2a, 0x2e, 0x5f, 0xbe, 0x4c, 0xe, 0x6d, 0x30, 0x18, 0x46, 0x8e, 0x1c, 0x39, 0x48, 0x68, 0x95, 0x4a, 0x95, 0x9e, 0x9e, 0x4e, 0xdb, 0x45, 0x65, 0xef, 0xde, 0xbd, 0x78, 0x52, 0x18, 0x7e, 0x98, 0xcd, 0x66, 0xce, 0xc3, 0xb9, 0x19, 0x33, 0x66, 0x70, 0xa2, 0xa7, 0xa6, 0xa6, 0x72, 0xd2, 0xb3, 0xd3, 0xe9, 0x1c, 0x4a, 0x94, 0x7b, 0xef, 0xbd, 0x97, 0x70, 0xfc, 0xe4, 0xe4, 0x64, 0x64, 0xe8, 0xf0, 0x63, 0xda, 0xb4, 0x69, 0xe4, 0x2d, 0x6, 0x55, 0x2a, 0x55, 0x6d, 0x6d, 0x2d, 0x39, 0xbb, 0x97, 0x95, 0x95, 0x71, 0x26, 0x4e, 0x18, 0xc, 0x86, 0x21, 0x2e, 0x2c, 0x46, 0xdb, 0xf, 0xbc, 0xb3, 0xb3, 0x93, 0xf3, 0xeb, 0x81, 0xc, 0x1d, 0x4, 0x98, 0xbf, 0xaa, 0xcf, 0x3e, 0xfb, 0x2c, 0x39, 0xb4, 0xdb, 0xed, 0xe6, 0x6c, 0x92, 0x12, 0x13, 0x13, 0x33, 0xf4, 0x3b, 0x4, 0x72, 0x19, 0xbd, 0x77, 0xef, 0x5e, 0xdc, 0x14, 0x86, 0x13, 0x9, 0x9, 0x9, 0x9c, 0x59, 0x75, 0x9c, 0xd0, 0xab, 0x56, 0xad, 0xe2, 0x7c, 0x97, 0x8e, 0x1f, 0x3f, 0xee, 0xd7, 0x6d, 0x68, 0x7c, 0x7c, 0x3c, 0x21, 0xca, 0xbc, 0x79, 0xf3, 0x20, 0x74, 0xd8, 0x50, 0x58, 0x58, 0xc8, 0x51, 0x6a, 0xdf, 0xbe, 0x7d, 0xe4, 0x91, 0x8d, 0xb, 0x17, 0x2e, 0x70, 0x42, 0xbf, 0xfd, 0xf6, 0xdb, 0xfe, 0x46, 0x24, 0x4f, 0x7b, 0x72, 0xb9, 0x5c, 0x10, 0x3a, 0xc, 0xe8, 0xec, 0xec, 0xe4, 0x28, 0xb5, 0x6a, 0xd5, 0x2a, 0x4e, 0xf4, 0x99, 0x33, 0x67, 0x92, 0x43, 0x6b, 0x34, 0x9a, 0x21, 0xde, 0xb, 0xfe, 0x1b, 0xf2, 0x2a, 0xc0, 0x1d, 0x1d, 0x1d, 0x10, 0x3a, 0xc, 0x28, 0x2e, 0x2e, 0xe6, 0x8, 0xdd, 0xd9, 0xd9, 0x19, 0xac, 0xef, 0x52, 0x71, 0x71, 0x31, 0x21, 0x28, 0x79, 0x7d, 0x82, 0x6f, 0xbf, 0xfd, 0x16, 0x42, 0x87, 0x3a, 0x57, 0xae, 0x5c, 0xe1, 0x28, 0xe5, 0x5b, 0x47, 0x94, 0x86, 0xc7, 0xe3, 0xe1, 0xbc, 0x61, 0xa5, 0xd5, 0x6a, 0x3d, 0x1e, 0xf, 0xad, 0xd4, 0xd1, 0xe9, 0x74, 0x84, 0x88, 0xf, 0x3c, 0xf0, 0x0, 0x84, 0xe, 0x75, 0x38, 0xf3, 0xe8, 0x23, 0x23, 0x23, 0xed, 0x76, 0x3b, 0xb9, 0x7a, 0x5e, 0xbd, 0x7a, 0x35, 0xe7, 0xbb, 0x54, 0x55, 0x55, 0x45, 0xfe, 0xd4, 0x33, 0x66, 0xcc, 0xa0, 0x5, 0xed, 0xeb, 0xeb, 0x83, 0xd0, 0xa1, 0x4b, 0x53, 0x53, 0x13, 0x47, 0x29, 0xdf, 0x3b, 0xd5, 0x34, 0x7a, 0x7a, 0x7a, 0x38, 0xa1, 0x27, 0x4f, 0x9e, 0x4c, 0xe, 0x2d, 0x8a, 0x62, 0x55, 0x55, 0x15, 0x2d, 0xee, 0xce, 0x9d, 0x3b, 0x21, 0x74, 0x88, 0xe2, 0x74, 0x3a, 0x39, 0x43, 0x75, 0x19, 0x19, 0x19, 0x9c, 0x69, 0x1b, 0x59, 0x59, 0x59, 0xe4, 0xd0, 0x2a, 0x95, 0xaa, 0xa5, 0xa5, 0x85, 0x1c, 0xda, 0x7, 0xed, 0x99, 0xe8, 0xb8, 0x71, 0xe3, 0x14, 0x70, 0xe9, 0x95, 0xf9, 0xa4, 0x30, 0x3b, 0x3b, 0xfb, 0xea, 0xd5, 0xab, 0xe4, 0xe6, 0xbe, 0x1d, 0x2f, 0x69, 0xd3, 0x36, 0x4c, 0x26, 0x53, 0x43, 0x43, 0x3, 0x39, 0xf4, 0xdc, 0xb9, 0x73, 0xd3, 0xd3, 0xd3, 0x39, 0x93, 0xa8, 0x4, 0x41, 0xa0, 0xd5, 0x5a, 0xe4, 0xe7, 0x32, 0x78, 0x52, 0x28, 0x7b, 0x7a, 0x56, 0xab, 0xd5, 0xe4, 0xe, 0x61, 0x2e, 0x58, 0x41, 0x2e, 0x61, 0x7d, 0x98, 0xcd, 0x66, 0x7e, 0xf, 0x90, 0xb7, 0xa0, 0xb5, 0x58, 0x2c, 0xc8, 0xd0, 0x21, 0x47, 0x4e, 0x4e, 0x8e, 0xc7, 0xe3, 0x21, 0x37, 0xf7, 0x4d, 0x7a, 0x26, 0x3f, 0xd8, 0xe3, 0xbc, 0x3, 0x9b, 0x97, 0x97, 0x97, 0x98, 0x98, 0xc8, 0xef, 0x1, 0xf2, 0xb6, 0x18, 0xcc, 0xbd, 0xcd, 0x91, 0xa1, 0xa5, 0xa7, 0xae, 0xae, 0x8e, 0xd3, 0x1b, 0xbe, 0x49, 0xcf, 0xe4, 0x1b, 0xb2, 0x5b, 0x6f, 0xbd, 0x95, 0x1c, 0x7a, 0xf4, 0xe8, 0xd1, 0x12, 0xf6, 0x3, 0xad, 0x68, 0xc9, 0xcc, 0xcc, 0xc4, 0x4d, 0x61, 0x68, 0x91, 0x91, 0x91, 0x41, 0x56, 0x2a, 0x29, 0x29, 0x89, 0xb3, 0x3, 0x2c, 0xf9, 0x9d, 0x11, 0x1f, 0xad, 0xad, 0xad, 0x12, 0xf6, 0xc3, 0x53, 0x4f, 0x3d, 0x45, 0x38, 0x87, 0xe8, 0xe8, 0x68, 0x8, 0x1d, 0x42, 0xec, 0xda, 0xb5, 0x8b, 0xa3, 0xd4, 0x99, 0x33, 0x67, 0xc8, 0xa1, 0x6d, 0x36, 0x1b, 0x27, 0xf4, 0x33, 0xcf, 0x3c, 0x23, 0x6d, 0x57, 0x9c, 0x3c, 0x79, 0x92, 0x76, 0x26, 0x83, 0xbf, 0xb9, 0x8, 0xa1, 0x3, 0x7, 0xf9, 0xcd, 0x67, 0x1f, 0x73, 0xe6, 0xcc, 0x9, 0x7c, 0x46, 0x1c, 0xa0, 0xa7, 0xa7, 0x47, 0xda, 0xde, 0x20, 0x7f, 0xc1, 0x56, 0xae, 0x5c, 0x9, 0xa1, 0x43, 0x82, 0xea, 0xea, 0x6a, 0x8e, 0x52, 0x8d, 0x8d, 0x8d, 0xe4, 0xd0, 0xcd, 0xcd, 0xcd, 0x9c, 0xd0, 0x4b, 0x96, 0x2c, 0x91, 0xbc, 0x37, 0x3c, 0x1e, 0xf, 0xed, 0x19, 0x78, 0x56, 0x56, 0x16, 0x84, 0xe, 0x3e, 0xcc, 0x85, 0x71, 0x99, 0xe9, 0x39, 0x36, 0x36, 0x96, 0x1c, 0x5a, 0xad, 0x56, 0x73, 0x9e, 0xb1, 0xf, 0xc2, 0xf3, 0xcf, 0x3f, 0x4f, 0x38, 0x9f, 0xa8, 0xa8, 0x28, 0xc, 0xdb, 0x5, 0x9f, 0x17, 0x5f, 0x7c, 0x91, 0xf3, 0x70, 0x6e, 0xd7, 0xae, 0x5d, 0xe4, 0xa1, 0xba, 0x6d, 0xdb, 0xb6, 0x91, 0x37, 0x49, 0x11, 0x4, 0x61, 0xfd, 0xfa, 0xf5, 0xd1, 0xd1, 0xd1, 0xcc, 0x27, 0x29, 0xd7, 0xa5, 0xa0, 0xa0, 0x80, 0xd0, 0xca, 0xe9, 0x74, 0xfe, 0xf2, 0xcb, 0x2f, 0x18, 0xb6, 0xb, 0x26, 0xcc, 0x2d, 0x70, 0xde, 0x78, 0xe3, 0xd, 0x72, 0x68, 0x97, 0xcb, 0x35, 0xc4, 0x17, 0xfe, 0x6e, 0x34, 0xae, 0x22, 0x5f, 0xb7, 0x58, 0xad, 0x56, 0xda, 0x59, 0xad, 0x59, 0xb3, 0x6, 0x25, 0x47, 0x30, 0xe1, 0xfc, 0xe2, 0xc7, 0xc7, 0xc7, 0x73, 0xa6, 0x6d, 0x2c, 0x5e, 0xbc, 0x98, 0xf3, 0x5d, 0x32, 0x99, 0x4c, 0x72, 0x14, 0x1b, 0x3, 0x64, 0x66, 0x66, 0x92, 0xa7, 0xb2, 0xa0, 0xe4, 0x8, 0x5a, 0xb1, 0xc1, 0xf9, 0xc5, 0xff, 0xf2, 0xcb, 0x2f, 0xc9, 0xd3, 0x36, 0x5a, 0x5a, 0x5a, 0x38, 0xab, 0x7b, 0x4d, 0x9c, 0x38, 0x71, 0xe6, 0xcc, 0x99, 0x72, 0x14, 0x1b, 0x3, 0xd0, 0x56, 0xd, 0x6e, 0x69, 0x69, 0x21, 0x67, 0x77, 0x94, 0x1c, 0xc1, 0x4c, 0xcf, 0x1a, 0x8d, 0x86, 0x13, 0x9a, 0xb9, 0x61, 0xe6, 0xef, 0xbf, 0xff, 0x2e, 0x77, 0xe7, 0x1c, 0x3c, 0x78, 0x90, 0x76, 0x6e, 0xd, 0xd, 0xd, 0xc8, 0xd0, 0x41, 0xe0, 0xb5, 0xd7, 0x5e, 0xe3, 0xa4, 0xe7, 0x23, 0x47, 0x8e, 0x90, 0xef, 0x5, 0xcf, 0x9d, 0x3b, 0xf7, 0xf9, 0xe7, 0x9f, 0x93, 0x43, 0x6f, 0xde, 0xbc, 0x79, 0xd2, 0xa4, 0x49, 0x72, 0xf7, 0xf, 0xf9, 0x15, 0x7, 0xa3, 0xd1, 0x88, 0xc, 0x1d, 0x68, 0xda, 0xdb, 0xdb, 0x39, 0x1f, 0x7c, 0xdd, 0xba, 0x75, 0xe4, 0xd0, 0xa2, 0x28, 0x4e, 0x9d, 0x3a, 0x95, 0x1c, 0x3a, 0x2e, 0x2e, 0x8e, 0xb3, 0xc8, 0xb4, 0x5f, 0x8c, 0x1f, 0x3f, 0x9e, 0x70, 0x86, 0x69, 0x69, 0x69, 0xb8, 0x29, 0xc, 0x34, 0x9c, 0x5d, 0x2b, 0xa3, 0xa3, 0xa3, 0x39, 0x2f, 0xee, 0x1f, 0x3d, 0x7a, 0x94, 0xf3, 0x5d, 0xa, 0xe4, 0x32, 0xe3, 0xaf, 0xbc, 0xf2, 0xa, 0xed, 0x24, 0xad, 0x56, 0x2b, 0x84, 0xe, 0x1c, 0xe4, 0xb9, 0xa, 0x3e, 0xf6, 0xef, 0xdf, 0x4f, 0xe, 0xdd, 0xdd, 0xdd, 0xcd, 0xd9, 0x70, 0xe8, 0xfe, 0xfb, 0xef, 0xe7, 0x8c, 0xab, 0xf8, 0xfb, 0x4b, 0x42, 0x7e, 0x15, 0xed, 0xa7, 0x9f, 0x7e, 0x82, 0xd0, 0x1, 0xa2, 0xbf, 0xbf, 0x9f, 0xb3, 0xbe, 0xd6, 0xdd, 0x77, 0xdf, 0xcd, 0x51, 0x6a, 0xc2, 0x84, 0x9, 0x9c, 0xef, 0xd2, 0xf9, 0xf3, 0xe7, 0x3, 0xdc, 0x5d, 0xb4, 0x55, 0x49, 0x17, 0x2d, 0x5a, 0x14, 0x8e, 0x6e, 0xa8, 0x38, 0x2b, 0xcc, 0x2, 0x10, 0x6a, 0x44, 0xa0, 0xb, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0xca, 0xe7, 0x7f, 0x1, 0x0, 0x0, 0xff, 0xff, 0x1a, 0xd5, 0xb5, 0x9c, 0xcd, 0x97, 0x3e, 0x9f, 0x0, 0x0, 0x0, 0x0, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82}
diff --git a/tools/debug/doberman/main.go b/tools/debug/doberman/main.go
index eb8eb447b..70899f3c8 100644
--- a/tools/debug/doberman/main.go
+++ b/tools/debug/doberman/main.go
@@ -23,7 +23,6 @@ import (
"flag"
"fmt"
"io"
- "io/ioutil"
"os"
"os/exec"
"strings"
@@ -57,7 +56,7 @@ func main() {
}
// write logo
- tf, err := ioutil.TempFile("", "algorand-logo.png")
+ tf, err := os.CreateTemp("", "algorand-logo.png")
if err != nil {
panic(err)
}
diff --git a/tools/debug/dumpblocks/main.go b/tools/debug/dumpblocks/main.go
new file mode 100644
index 000000000..09698bdb0
--- /dev/null
+++ b/tools/debug/dumpblocks/main.go
@@ -0,0 +1,122 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "database/sql"
+ "flag"
+ "fmt"
+ "math/rand"
+ "os"
+ "time"
+
+ _ "github.com/mattn/go-sqlite3"
+)
+
+var blockDBfile = flag.String("blockdb", "", "Block DB filename")
+var numBlocks = flag.Int("numblocks", 10000, "Randomly sample this many blocks for training")
+var startRound = flag.Int("start", 0, "Sample blocks starting at this round (inclusive)")
+var endRound = flag.Int("end", 0, "Sample blocks ending at this round (inclusive)")
+var outDir = flag.String("outdir", ".", "Write blocks to this directory")
+var randSeed = flag.Int("seed", 0, "Random seed, otherwise will use time")
+
+func getBlockToFile(db *sql.DB, rnd int64) error {
+ var buf []byte
+ err := db.QueryRow("SELECT blkdata FROM blocks WHERE rnd=?", rnd).Scan(&buf)
+ if err != nil {
+ return err
+ }
+ return os.WriteFile(fmt.Sprintf("%s/%d.block", *outDir, rnd), buf, 0644)
+}
+
+func usage() {
+ flag.Usage()
+ os.Exit(1)
+}
+
+func main() {
+ flag.Parse()
+ if *blockDBfile == "" {
+ fmt.Println("-blockdb=file required")
+ usage()
+ }
+ uri := fmt.Sprintf("file:%s?mode=ro", *blockDBfile)
+ fmt.Println("Opening", uri)
+ db, err := sql.Open("sqlite3", uri)
+ if err != nil {
+ panic(err)
+ }
+ defer db.Close()
+
+ err = db.Ping()
+ if err != nil {
+ panic(err)
+ }
+
+ seed := int64(*randSeed)
+ if seed == 0 {
+ seed = time.Now().UnixMicro()
+ }
+ rand.Seed(seed)
+
+ var minRound, maxRound int64
+ if *startRound != 0 {
+ minRound = int64(*startRound)
+ }
+ if *endRound != 0 {
+ maxRound = int64(*endRound)
+ }
+ if maxRound == 0 {
+ err = db.QueryRow("SELECT MAX(rnd) FROM blocks").Scan(&maxRound)
+ if err != nil {
+ panic(err)
+ }
+ }
+ if minRound == 0 {
+ err := db.QueryRow("SELECT MIN(rnd) FROM blocks").Scan(&minRound)
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ N := maxRound - minRound
+ if N <= 0 {
+ panic("maxRound must be greater than minRound")
+ }
+
+ if N <= int64(*numBlocks) {
+ // just get all blocks from minRound to maxRound
+ fmt.Printf("Saving all blocks between round %d and %d\n", minRound, maxRound)
+ for i := minRound; i <= maxRound; i++ {
+ err = getBlockToFile(db, i)
+ if err != nil {
+ panic(err)
+ }
+
+ }
+ os.Exit(0)
+ }
+
+ fmt.Printf("Loading %d random blocks between round %d and %d\n", *numBlocks, minRound, maxRound)
+ for i := 0; i < *numBlocks; i++ {
+ round := minRound + rand.Int63n(N) + 1
+ err = getBlockToFile(db, round)
+ if err != nil {
+ panic(err)
+ }
+ }
+}
diff --git a/tools/debug/logfilter/main_test.go b/tools/debug/logfilter/main_test.go
index 9058f611c..45ab8605f 100644
--- a/tools/debug/logfilter/main_test.go
+++ b/tools/debug/logfilter/main_test.go
@@ -19,7 +19,6 @@ package main
import (
"bytes"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -48,7 +47,7 @@ func TestLogFilterExamples(t *testing.T) {
for _, exampleFileName := range exampleFiles {
// load the expected result file.
expectedOutFile := strings.Replace(exampleFileName, ".in", ".out.expected", 1)
- expectedOutBytes, err := ioutil.ReadFile(expectedOutFile)
+ expectedOutBytes, err := os.ReadFile(expectedOutFile)
require.NoError(t, err)
expectedErrorCode := 0
if strings.Contains(string(expectedOutBytes), "FAIL") {
diff --git a/tools/network/cloudflare/cloudflare.go b/tools/network/cloudflare/cloudflare.go
index 414f81232..714fb9635 100644
--- a/tools/network/cloudflare/cloudflare.go
+++ b/tools/network/cloudflare/cloudflare.go
@@ -19,7 +19,7 @@ package cloudflare
import (
"context"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"strings"
)
@@ -170,7 +170,7 @@ func (d *DNS) CreateDNSRecord(ctx context.Context, recordType string, name strin
if !parsedResponse.Success {
request, _ := createDNSRecordRequest(d.zoneID, d.authToken, recordType, name, content, ttl, priority, proxied)
requestBody, _ := request.GetBody()
- bodyBytes, _ := ioutil.ReadAll(requestBody)
+ bodyBytes, _ := io.ReadAll(requestBody)
return fmt.Errorf("failed to create DNS record. Request url = '%v', body = %s, parsed response : %#v, response headers = %#v", request.URL, string(bodyBytes), parsedResponse, response.Header)
}
return nil
@@ -195,7 +195,7 @@ func (d *DNS) CreateSRVRecord(ctx context.Context, name string, target string, t
if !parsedResponse.Success {
request, _ := createSRVRecordRequest(d.zoneID, d.authToken, name, service, protocol, weight, port, ttl, priority, target)
requestBody, _ := request.GetBody()
- bodyBytes, _ := ioutil.ReadAll(requestBody)
+ bodyBytes, _ := io.ReadAll(requestBody)
return fmt.Errorf("failed to create SRV record. Request url = '%v', body = %s, parsedResponse = %#v, response headers = %#v", request.URL, string(bodyBytes), parsedResponse, response.Header)
}
return nil
@@ -220,7 +220,7 @@ func (d *DNS) DeleteDNSRecord(ctx context.Context, recordID string) error {
if !parsedResponse.Success {
request, _ := deleteDNSRecordRequest(d.zoneID, d.authToken, recordID)
requestBody, _ := request.GetBody()
- bodyBytes, _ := ioutil.ReadAll(requestBody)
+ bodyBytes, _ := io.ReadAll(requestBody)
return fmt.Errorf("failed to delete DNS record. Request url = '%v', body = %s, parsedResponse = %#v, response headers = %#v", request.URL, string(bodyBytes), parsedResponse, response.Header)
}
return nil
@@ -246,7 +246,7 @@ func (d *DNS) UpdateDNSRecord(ctx context.Context, recordID string, recordType s
if !parsedResponse.Success {
request, _ := updateDNSRecordRequest(d.zoneID, d.authToken, recordID, recordType, name, content, ttl, priority, proxied)
requestBody, _ := request.GetBody()
- bodyBytes, _ := ioutil.ReadAll(requestBody)
+ bodyBytes, _ := io.ReadAll(requestBody)
return fmt.Errorf("failed to update DNS record. Request url = '%v', body = %s, parsedResponse = %#v, response headers = %#v", request.URL, string(bodyBytes), parsedResponse, response.Header)
}
@@ -272,7 +272,7 @@ func (d *DNS) UpdateSRVRecord(ctx context.Context, recordID string, name string,
if !parsedResponse.Success {
request, _ := updateSRVRecordRequest(d.zoneID, d.authToken, recordID, name, service, protocol, weight, port, ttl, priority, target)
requestBody, _ := request.GetBody()
- bodyBytes, _ := ioutil.ReadAll(requestBody)
+ bodyBytes, _ := io.ReadAll(requestBody)
return fmt.Errorf("failed to update SRV record. Request url = '%v', body = %s, parsedResponse = %#v, response headers = %#v", request.URL, string(bodyBytes), parsedResponse, response.Header)
}
return nil
@@ -303,7 +303,7 @@ func (c *Cred) GetZones(ctx context.Context) (zones []Zone, err error) {
if !parsedResponse.Success {
request, _ := getZonesRequest(c.authToken)
requestBody, _ := request.GetBody()
- bodyBytes, _ := ioutil.ReadAll(requestBody)
+ bodyBytes, _ := io.ReadAll(requestBody)
return nil, fmt.Errorf("failed to retrieve zone records. Request url = '%v', body = %s, parsedResponse = %#v, response headers = %#v", request.URL, string(bodyBytes), parsedResponse, response.Header)
}
@@ -360,7 +360,7 @@ func (d *DNS) ExportZone(ctx context.Context) (exportedZoneBytes []byte, err err
return nil, err
}
defer response.Body.Close()
- body, err := ioutil.ReadAll(response.Body)
+ body, err := io.ReadAll(response.Body)
if err != nil {
return nil, err
}
diff --git a/tools/network/cloudflare/createRecord.go b/tools/network/cloudflare/createRecord.go
index 747dc57ae..c68747f5b 100644
--- a/tools/network/cloudflare/createRecord.go
+++ b/tools/network/cloudflare/createRecord.go
@@ -20,7 +20,7 @@ import (
"bytes"
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/url"
)
@@ -144,7 +144,7 @@ type CreateDNSRecordResult struct {
// parseCreateDNSRecordResponse parses the response that was received as a result of a ListDNSRecordRequest
func parseCreateDNSRecordResponse(response *http.Response) (*CreateDNSRecordResponse, error) {
defer response.Body.Close()
- body, err := ioutil.ReadAll(response.Body)
+ body, err := io.ReadAll(response.Body)
if err != nil {
return nil, err
}
diff --git a/tools/network/cloudflare/deleteRecord.go b/tools/network/cloudflare/deleteRecord.go
index 9770be898..f0bf90ce5 100644
--- a/tools/network/cloudflare/deleteRecord.go
+++ b/tools/network/cloudflare/deleteRecord.go
@@ -19,7 +19,7 @@ package cloudflare
import (
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/url"
)
@@ -55,7 +55,7 @@ type DeleteDNSRecordResult struct {
// ParseDeleteDNSRecordResponse parses the response that was received as a result of a ListDNSRecordRequest
func parseDeleteDNSRecordResponse(response *http.Response) (*DeleteDNSRecordResponse, error) {
defer response.Body.Close()
- body, err := ioutil.ReadAll(response.Body)
+ body, err := io.ReadAll(response.Body)
if err != nil {
return nil, err
}
diff --git a/tools/network/cloudflare/listRecords.go b/tools/network/cloudflare/listRecords.go
index 263e8adf8..1617b6118 100644
--- a/tools/network/cloudflare/listRecords.go
+++ b/tools/network/cloudflare/listRecords.go
@@ -19,7 +19,7 @@ package cloudflare
import (
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/url"
)
@@ -120,7 +120,7 @@ type DNSRecordResponseEntry struct {
// parseListDNSRecordResponse parses the response that was received as a result of a ListDNSRecordRequest
func parseListDNSRecordResponse(response *http.Response) (*ListDNSRecordResponse, error) {
defer response.Body.Close()
- body, err := ioutil.ReadAll(response.Body)
+ body, err := io.ReadAll(response.Body)
if err != nil {
return nil, err
}
diff --git a/tools/network/cloudflare/zones.go b/tools/network/cloudflare/zones.go
index d73829ea5..f5aa4b9ac 100644
--- a/tools/network/cloudflare/zones.go
+++ b/tools/network/cloudflare/zones.go
@@ -19,7 +19,7 @@ package cloudflare
import (
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/url"
)
@@ -71,7 +71,7 @@ type GetZonesResultItem struct {
func parseGetZonesResponse(response *http.Response) (*GetZonesResult, error) {
defer response.Body.Close()
- body, err := ioutil.ReadAll(response.Body)
+ body, err := io.ReadAll(response.Body)
if err != nil {
return nil, err
}
diff --git a/tools/teal/algotmpl/main.go b/tools/teal/algotmpl/main.go
index 2d24841a5..7568dda92 100644
--- a/tools/teal/algotmpl/main.go
+++ b/tools/teal/algotmpl/main.go
@@ -20,7 +20,6 @@ package main
import (
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -107,7 +106,7 @@ type param struct {
}
func initCommandsFromDir(dirname string) error {
- files, err := ioutil.ReadDir(dirname)
+ files, err := os.ReadDir(dirname)
if err != nil {
return err
}
@@ -137,7 +136,7 @@ func initCommandsFromDir(dirname string) error {
if err != nil {
return err
}
- data, err := ioutil.ReadFile(fullpath)
+ data, err := os.ReadFile(fullpath)
if err != nil {
return err
}
diff --git a/tools/teal/dkey/dsign/main.go b/tools/teal/dkey/dsign/main.go
index ff8a6067b..6f380d8a0 100644
--- a/tools/teal/dkey/dsign/main.go
+++ b/tools/teal/dkey/dsign/main.go
@@ -23,7 +23,7 @@ package main
import (
"encoding/base64"
"fmt"
- "io/ioutil"
+ "io"
"os"
"github.com/algorand/go-algorand/crypto"
@@ -47,7 +47,7 @@ func main() {
keyfname := os.Args[1]
lsigfname := os.Args[2]
- kdata, err := ioutil.ReadFile(keyfname)
+ kdata, err := os.ReadFile(keyfname)
failFast(err)
var seed crypto.Seed
copy(seed[:], kdata)
@@ -56,10 +56,10 @@ func main() {
if len(os.Args) == 4 {
// In this mode, interpret lsig-file as raw program bytes and produce a signature
// over the data file
- pdata, err := ioutil.ReadFile(lsigfname)
+ pdata, err := os.ReadFile(lsigfname)
failFast(err)
- ddata, err := ioutil.ReadFile(os.Args[3])
+ ddata, err := os.ReadFile(os.Args[3])
failFast(err)
dsig := sec.Sign(logic.Msg{
@@ -71,13 +71,13 @@ func main() {
} else {
// In this mode, interpret lsig-file as a LogicSig struct and sign the
// txid of the transaction passed over stdin
- pdata, err := ioutil.ReadFile(lsigfname)
+ pdata, err := os.ReadFile(lsigfname)
failFast(err)
var lsig transactions.LogicSig
err = protocol.Decode(pdata, &lsig)
failFast(err)
- txdata, err := ioutil.ReadAll(os.Stdin)
+ txdata, err := io.ReadAll(os.Stdin)
failFast(err)
var txn transactions.SignedTxn
err = protocol.Decode(txdata, &txn)
diff --git a/tools/teal/tealcut/main.go b/tools/teal/tealcut/main.go
index 96958c42a..7615cdc97 100644
--- a/tools/teal/tealcut/main.go
+++ b/tools/teal/tealcut/main.go
@@ -22,7 +22,6 @@ import (
"encoding/binary"
"encoding/hex"
"fmt"
- "io/ioutil"
"os"
"strconv"
"strings"
@@ -56,7 +55,7 @@ func main() {
}
var splitbytes [8]byte
binary.BigEndian.PutUint64(splitbytes[:], splitnum)
- data, err := ioutil.ReadFile(os.Args[1])
+ data, err := os.ReadFile(os.Args[1])
if err != nil {
panic(err)
}
diff --git a/util/codecs/json.go b/util/codecs/json.go
index 071c2f30c..2d2c21134 100644
--- a/util/codecs/json.go
+++ b/util/codecs/json.go
@@ -21,7 +21,6 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
"os"
"reflect"
"strings"
@@ -79,7 +78,7 @@ func SaveNonDefaultValuesToFile(filename string, object, defaultObject interface
// When done, ensure last value line doesn't include comma
// Write string array to file.
- file, err := ioutil.TempFile("", "encsndv")
+ file, err := os.CreateTemp("", "encsndv")
if err != nil {
return err
}
@@ -94,7 +93,7 @@ func SaveNonDefaultValuesToFile(filename string, object, defaultObject interface
}
// Read lines from encoded file into string array
- content, err := ioutil.ReadFile(name)
+ content, err := os.ReadFile(name)
if err != nil {
return err
}
diff --git a/util/db/dbutil_test.go b/util/db/dbutil_test.go
index 5454f73ba..412f04a6c 100644
--- a/util/db/dbutil_test.go
+++ b/util/db/dbutil_test.go
@@ -238,6 +238,10 @@ func cleanupSqliteDb(t *testing.T, path string) {
func TestDBConcurrencyRW(t *testing.T) {
partitiontest.PartitionTest(t)
+ if testing.Short() {
+ // Since it is a long operation and can only be affected by the db package, we can skip this test when running short tests only.
+ t.Skip("skipped as part of short test suite")
+ }
dbFolder := "/dev/shm"
os := runtime.GOOS
@@ -492,6 +496,11 @@ func testLockingTableWhileWriting(t *testing.T, useWAL bool) {
partitiontest.PartitionTest(t)
a := require.New(t)
+ if testing.Short() {
+ // Since it is a long operation and can only be affected by the db package, we can skip this test when running short tests only.
+ t.Skip("skipped as part of short test suite")
+ }
+
dbParams := []string{"_secure_delete=on"} // not required but used in ErasableAccessor, so I'd like it to be tested here as well
if useWAL {
dbParams = []string{"_secure_delete=on", "_journal_mode=wal"}
diff --git a/util/io.go b/util/io.go
index 081c1d568..43068f39c 100644
--- a/util/io.go
+++ b/util/io.go
@@ -19,7 +19,6 @@ package util
import (
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -83,7 +82,7 @@ func ExeDir() (string, error) {
// GetFirstLineFromFile retrieves the first line of the specified file.
func GetFirstLineFromFile(netFile string) (string, error) {
- addrStr, err := ioutil.ReadFile(netFile)
+ addrStr, err := os.ReadFile(netFile)
if err != nil {
return "", err
}
@@ -130,7 +129,7 @@ func copyFolder(source string, dest string, info os.FileInfo, includeFilter Incl
return fmt.Errorf("error creating destination folder: %v", err)
}
- contents, err := ioutil.ReadDir(source)
+ contents, err := os.ReadDir(source)
if err != nil {
return err
}
diff --git a/util/metrics/metrics.go b/util/metrics/metrics.go
index 01f161888..f9965d001 100644
--- a/util/metrics/metrics.go
+++ b/util/metrics/metrics.go
@@ -49,6 +49,8 @@ var (
DuplicateNetworkMessageReceivedTotal = MetricName{Name: "algod_network_duplicate_message_received_total", Description: "Total number of duplicate messages that were received from the network"}
// DuplicateNetworkMessageReceivedBytesTotal The total number ,in bytes, of the duplicate messages that were received from the network
DuplicateNetworkMessageReceivedBytesTotal = MetricName{Name: "algod_network_duplicate_message_received_bytes_total", Description: "The total number ,in bytes, of the duplicate messages that were received from the network"}
+ // DuplicateNetworkFilterReceivedTotal Total number of duplicate filter messages (tag MsgDigestSkipTag) that were received from the network
+ DuplicateNetworkFilterReceivedTotal = MetricName{Name: "algod_network_duplicate_filter_received_total", Description: "Total number of duplicate filter messages that were received from the network"}
// OutgoingNetworkMessageFilteredOutTotal Total number of messages that were not sent per peer request
OutgoingNetworkMessageFilteredOutTotal = MetricName{Name: "algod_outgoing_network_message_filtered_out_total", Description: "Total number of messages that were not sent per peer request"}
// OutgoingNetworkMessageFilteredOutBytesTotal Total number of bytes saved by not sending messages that were asked not to be sent by peer
diff --git a/util/metrics/metrics_test.go b/util/metrics/metrics_test.go
index fddb9eda6..2e2828b1f 100644
--- a/util/metrics/metrics_test.go
+++ b/util/metrics/metrics_test.go
@@ -18,7 +18,7 @@ package metrics
import (
"fmt"
- "io/ioutil"
+ "io"
"net"
"net/http"
"strings"
@@ -65,7 +65,7 @@ func (p *MetricTest) createListener(endpoint string) int {
func (p *MetricTest) testMetricsHandler(w http.ResponseWriter, r *http.Request) {
// read the entire request:
- body, err := ioutil.ReadAll(r.Body)
+ body, err := io.ReadAll(r.Body)
if err != nil {
return
}
diff --git a/util/metrics/tagcounter.go b/util/metrics/tagcounter.go
index d4de0d53c..80689e7a8 100644
--- a/util/metrics/tagcounter.go
+++ b/util/metrics/tagcounter.go
@@ -104,16 +104,13 @@ func (tc *TagCounter) Add(tag string, val uint64) {
var st []uint64
if len(tc.storage) > 0 {
st = tc.storage[len(tc.storage)-1]
- //fmt.Printf("new tag %v, old block\n", tag)
}
if tc.storagePos > (len(st) - 1) {
- //fmt.Printf("new tag %v, new block\n", tag)
st = make([]uint64, 16)
tc.storagePos = 0
tc.storage = append(tc.storage, st)
}
newtags[tag] = &(st[tc.storagePos])
- //fmt.Printf("tag %v = %p\n", tag, newtags[tag])
tc.storagePos++
tc.tags = newtags
tc.tagptr.Store(newtags)
@@ -155,7 +152,8 @@ func (tc *TagCounter) WriteMetric(buf *strings.Builder, parentLabels string) {
buf.WriteRune('}')
}
buf.WriteRune(' ')
- buf.WriteString(strconv.FormatUint(*tagcount, 10))
+ count := atomic.LoadUint64(tagcount)
+ buf.WriteString(strconv.FormatUint(count, 10))
buf.WriteRune('\n')
}
}
@@ -179,6 +177,7 @@ func (tc *TagCounter) AddMetric(values map[string]float64) {
} else {
name = tc.Name + "_" + tag
}
- values[sanitizeTelemetryName(name)] = float64(*tagcount)
+ count := atomic.LoadUint64(tagcount)
+ values[sanitizeTelemetryName(name)] = float64(count)
}
}
diff --git a/util/sleep_linux_32.go b/util/sleep_linux_32.go
index 1d155fac0..50a0e696c 100644
--- a/util/sleep_linux_32.go
+++ b/util/sleep_linux_32.go
@@ -31,5 +31,5 @@ func NanoSleep(d time.Duration) {
Nsec: int32(d.Nanoseconds() % time.Second.Nanoseconds()),
Sec: int32(d.Nanoseconds() / time.Second.Nanoseconds()),
}
- syscall.Nanosleep(timeSpec, nil) // nolint:errcheck
+ syscall.Nanosleep(timeSpec, nil) // nolint:errcheck // ignoring error
}
diff --git a/util/sleep_linux_64.go b/util/sleep_linux_64.go
index 2897ceaa1..b2f7a69db 100644
--- a/util/sleep_linux_64.go
+++ b/util/sleep_linux_64.go
@@ -30,5 +30,5 @@ func NanoSleep(d time.Duration) {
Nsec: d.Nanoseconds() % time.Second.Nanoseconds(),
Sec: d.Nanoseconds() / time.Second.Nanoseconds(),
}
- syscall.Nanosleep(timeSpec, nil) // nolint:errcheck
+ syscall.Nanosleep(timeSpec, nil) // nolint:errcheck // ignoring error
}
diff --git a/util/tokens/tokens.go b/util/tokens/tokens.go
index 7b6d33d47..930f030bb 100644
--- a/util/tokens/tokens.go
+++ b/util/tokens/tokens.go
@@ -19,7 +19,7 @@ package tokens
import (
"crypto/rand"
"fmt"
- "io/ioutil"
+ "os"
"path/filepath"
"github.com/algorand/go-algorand/util"
@@ -59,7 +59,7 @@ func GetAndValidateAPIToken(dataDir, tokenFilename string) (string, error) {
// writeAPITokenToDisk persists the APIToken to the datadir
func writeAPITokenToDisk(dataDir, tokenFilename, apiToken string) error {
filepath := tokenFilepath(dataDir, tokenFilename)
- return ioutil.WriteFile(filepath, []byte(apiToken), 0644)
+ return os.WriteFile(filepath, []byte(apiToken), 0644)
}
// GenerateAPIToken writes a cryptographically secure APIToken to disk